Skip to content

Instantly share code, notes, and snippets.

@mathieu-benoit
Last active July 28, 2021 17:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mathieu-benoit/3ba45c9cb3b3751fbd3d1fa4136c0fe6 to your computer and use it in GitHub Desktop.
Save mathieu-benoit/3ba45c9cb3b3751fbd3d1fa4136c0fe6 to your computer and use it in GitHub Desktop.
clusterName=crfa-external
gcloud container clusters create $clusterName \
--zone=$zone \
--addons=HttpLoadBalancing,CloudRun \
--machine-type=n1-standard-2 \
--num-nodes=3 \
--enable-stackdriver-kubernetes
gcloud container clusters get-credentials $clusterName \
--zone $zone
elbIp=$(kubectl get svc istio-ingress -n gke-system -o jsonpath="{.status.loadBalancer.ingress[*].ip}")
cat <<EOF > patch.json
{"data": {"example.com": null, "$elbIp.nip.io": ""}}
EOF
kubectl patch configmap config-domain --namespace knative-serving --patch \
--type=json -p="$(cat patch.json)"
gcloud run deploy internal \
--image gcr.io/knative-samples/helloworld-go \
--cluster=$clusterName \
--cluster-location=$zone \
--platform gke \
--project $projectId \
--set-env-vars TARGET=internal \
--ingress internal
gcloud run deploy external \
--image gcr.io/knative-samples/helloworld-go \
--cluster=$clusterName \
--cluster-location=$zone \
--platform gke \
--project $projectId \
--set-env-vars TARGET=external \
--ingress all
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: internal-lb
namespace: gke-system
annotations:
cloud.google.com/load-balancer-type: "Internal"
spec:
type: LoadBalancer
selector:
istio: ingress-gke-system
ports:
- name: http2
port: 80
targetPort: 8081
EOF
# There is an issue currently with CRfA, for the internal service, you need to replace this annotation:
# serving.knative.dev/visibility: cluster-local
# by this one:
# networking.knative.dev/visibility: cluster-local
# Otherwise the internal service is accessible publicly
# Here is the command to accomplish this:
kubectl edit service.serving.knative.dev/internal
## Test
# Then you could successfully run this command with 200 HTTP status code (otherwise, before, it was a 404) if you have access to the VPC hosting that ILB's IP address:
ilbIp=$(kubectl get svc internal-lb -n gke-system -o jsonpath="{.status.loadBalancer.ingress[*].ip}")
curl $ilbIp -H "Host: internal.default.svc.cluster.local"
# Custom domain mapping with this ILB for those internal services
export CUSTOM_DOMAIN=custom-domain.example.com
cat <<EOF | kubectl apply -f -
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: ${CUSTOM_DOMAIN}
namespace: default
spec:
# This is the gateway shared in knative service mesh.
gateways:
- knative-serving/knative-local-gateway
# Set host to the domain name that you own.
hosts:
- ${CUSTOM_DOMAIN}
http:
- headers:
request:
add:
K-Original-Host: ${CUSTOM_DOMAIN}
rewrite:
authority: internal.default.svc.cluster.local
route:
- destination:
host: knative-local-gateway.gke-system.svc.cluster.local
port:
number: 80
weight: 100
EOF
# In the machine under the same VPC you could now successfully run:
curl $ilbIp -H "Host:${CUSTOM_DOMAIN}"
# Conclusion: all the internal knative services (exposed via the istio-ingress by CRfA) are accessible via the ILB.
#### Support HTTPS
# We will use self-signed certificate as an experiment.
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -subj '/O=example Inc./CN=example.com' -keyout example.com.key -out example.com.crt
openssl req -out ${CUSTOM_DOMAIN}.csr -newkey rsa:2048 -nodes -keyout ${CUSTOM_DOMAIN}.key -subj "/CN=${CUSTOM_DOMAIN}/O=httpbin organization"
openssl x509 -req -days 365 -CA example.com.crt -CAkey example.com.key -set_serial 0 -in ${CUSTOM_DOMAIN}.csr -out ${CUSTOM_DOMAIN}.crt
kubectl create -n gke-system secret tls custom-domain-credential --key=${CUSTOM_DOMAIN}.key --cert=${CUSTOM_DOMAIN}.crt
# Configure LB service to support HTTPS
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: internal-lb
namespace: gke-system
annotations:
cloud.google.com/load-balancer-type: "Internal"
spec:
type: LoadBalancer
selector:
istio: ingress-gke-system
ports:
- name: http2
port: 80
targetPort: 8081
- name: https
port: 443
targetPort: 443
EOF
# Create a gateway to support HTTPS
cat <<EOF | kubectl apply -f -
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: internal-lb-gateway
namespace: knative-serving
spec:
selector:
istio: ingress-gke-system
servers:
- hosts:
- ${CUSTOM_DOMAIN}
port:
number: 443
name: https
protocol: HTTPS
tls:
mode: SIMPLE
credentialName: custom-domain-credential # must be the same as secret
EOF
# Ajust VS to bind to internal-lb-gateway
cat <<EOF | kubectl apply -f -
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: ${CUSTOM_DOMAIN}
namespace: default
spec:
# This is the gateway shared in knative service mesh.
gateways:
- knative-serving/internal-lb-gateway
# Set host to the domain name that you own.
hosts:
- ${CUSTOM_DOMAIN}
http:
- headers:
request:
add:
K-Original-Host: ${CUSTOM_DOMAIN}
rewrite:
authority: internal.default.svc.cluster.local
route:
- destination:
host: knative-local-gateway.gke-system.svc.cluster.local
port:
number: 80
weight: 100
EOF
# In the machine under the same VPC run:
curl https://${CUSTOM_DOMAIN} --resolve '${CUSTOM_DOMAIN}:443:${ilbIp}' -k
clusterName=crfa-internal
gcloud container clusters create $clusterName \
--zone=$zone \
--addons=HttpLoadBalancing,CloudRun \
--machine-type=n1-standard-2 \
--num-nodes=3 \
--enable-stackdriver-kubernetes \
--cloud-run-config=load-balancer-type=INTERNAL
gcloud container clusters get-credentials $clusterName \
--zone $zone
kubectl get svc istio-ingress -n gke-system -o jsonpath="{.status.loadBalancer.ingress[*].ip}"
kubectl patch configmap config-domain --namespace knative-serving --patch \
'{"data": {"example.com": null, "FIXME.nip.io": ""}}'
gcloud run deploy internal \
--image gcr.io/knative-samples/helloworld-go \
--cluster=$clusterName \
--cluster-location=$zone \
--platform gke \
--project $projectId \
--set-env-vars TARGET=internal \
--ingress internal
gcloud run deploy external \
--image gcr.io/knative-samples/helloworld-go \
--cluster=$clusterName \
--cluster-location=$zone \
--platform gke \
--project $projectId \
--set-env-vars TARGET=external \
--ingress all
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: public-lb
namespace: gke-system
spec:
type: LoadBalancer
selector:
istio: ingress-gke-system
ports:
- name: http2
port: 80
targetPort: 8081
EOF
# There is an issue currently with CRfA, for the internal service, you need to replace this annotation:
# serving.knative.dev/visibility: cluster-local
# by this one:
# networking.knative.dev/visibility: cluster-local
# Otherwise the internal service is accessible publicly
# Here is the command to accomplish this:
kubectl edit service.serving.knative.dev/internal
## Test
# Then you could successfully run this command with 200 HTTP status code (otherwise, before, it was a 404):
ilbIp=$(kubectl get svc public-lb -n gke-system -o jsonpath="{.status.loadBalancer.ingress[*].ip}")
curl $ilbIp -H "Host: internal.default.svc.cluster.local"
curl $ilbIp -H "Host: external.default.svc.cluster.local"
# Conclusion: both internal and external knative services (exposed via the istio-ingress by CRfA) are accessible via the public LB. Which is not the intent, so not acceptable.
projectId=mycrfa
projectName=mycrfa
folderId=FIXME
zone=us-east4-a
billingAccountId=FIXME
vmName=jumpbox
gcloud projects create $projectId \
--folder $folderId \
--name $projectName
gcloud config set project $projectId
gcloud beta billing projects link $projectId \
--billing-account $billingAccountId
gcloud services enable container.googleapis.com
gcloud compute instances create $vmName --zone $zone
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment