Skip to content

Instantly share code, notes, and snippets.

@tiswanso
Last active July 26, 2018 18:40
Show Gist options
  • Save tiswanso/47ddbd8f63a448a449c8609eefc64df0 to your computer and use it in GitHub Desktop.
Save tiswanso/47ddbd8f63a448a449c8609eefc64df0 to your computer and use it in GitHub Desktop.
Create 2 clusters:
proj=<proj name from gcloud projects list>
cluster="cluster-1"
zone="us-east1-b"
gcloud beta container --project $proj clusters create $cluster --zone $zone --username "admin" \
--cluster-version "1.9.6-gke.1" --machine-type "n1-standard-2" --image-type "COS" --disk-size "100" \
--scopes "https://www.googleapis.com/auth/compute","https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append"\
--num-nodes "4" --network "default" --enable-cloud-logging --enable-cloud-monitoring --enable-ip-alias --async
cluster="cluster-2"
gcloud beta container --project $proj clusters create $cluster --zone $zone --username "admin" \
--cluster-version "1.9.6-gke.1" --machine-type "n1-standard-2" --image-type "COS" --disk-size "100" \
--scopes "https://www.googleapis.com/auth/compute","https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append"\
--num-nodes "4" --network "default" --enable-cloud-logging --enable-cloud-monitoring --enable-ip-alias --async
Check until both are active:
gcloud container clusters list
Get credentials (both stored in .kube/config by default):
gcloud container clusters get-credentials cluster-1 --zone $zone --project $proj
gcloud container clusters get-credentials cluster-2 --zone $zone --project $proj
set your GCP user email to KUBE_USER:
KUBE_USER=tiswanso@cisco.com
Create clusterrolebinding to cluster-admin for your user:
kubectl create clusterrolebinding prow-cluster-admin-binding\
--clusterrole=cluster-admin\
--user="${KUBE_USER}"
Set firewall rule for pod CIDRs between clusters' nodes:
function join_by { local IFS="$1"; shift; echo "$*"; }
ALL_CLUSTER_CIDRS=$(gcloud container clusters list --format='value(clusterIpv4Cidr)' | sort | uniq)
ALL_CLUSTER_CIDRS=$(join_by , ${ALL_CLUSTER_CIDRS})
ALL_CLUSTER_NETTAGS=$(gcloud compute instances list --format='value(tags.items.[0])' | sort | uniq)
ALL_CLUSTER_NETTAGS=$(join_by , ${ALL_CLUSTER_NETTAGS})
gcloud compute firewall-rules create istio-multicluster-test-pods \
--allow=tcp,udp,icmp,esp,ah,sctp \
--direction=INGRESS \
--priority=900 \
--source-ranges="${ALL_CLUSTER_CIDRS}" \
--target-tags="${ALL_CLUSTER_NETTAGS}" --quiet
Install istio:
kubectl config get-contexts
- find context name for the cluster you want as main cluster (with istio control-plane), e.g. cluster-1
kubectl config use-context cluster-1
HUB=docker.io/istionightly
TAG=nightly-release-1.0
helm template install/kubernetes/helm/istio --name istio --namespace istio-system --set global.hub=$HUB --set global.tag=$TAG > ~/tmp/istio_master.yaml
kubectl apply -f ~/tmp/istio_master.yaml
kubectl label namespace default istio-injection=enabled
<wait for pods to come up>
kubectl get pods -n istio-system
Get Pod IPs:
export PILOT_POD_IP=$(kubectl -n istio-system get pod -l istio=pilot -o jsonpath='{.items[0].status.podIP}')
export POLICY_POD_IP=$(kubectl -n istio-system get pod -l istio=mixer -o jsonpath='{.items[0].status.podIP}')
export STATSD_POD_IP=$(kubectl -n istio-system get pod -l istio=statsd-prom-bridge -o jsonpath='{.items[0].status.podIP}')
export TELEMETRY_POD_IP=$(kubectl -n istio-system get pod -l istio-mixer-type=telemetry -o jsonpath='{.items[0].status.podIP}')
export ZIPKIN_POD_IP=$(kubectl -n istio-system get pod -l app=jaeger -o jsonpath='{range .items[*]}{.status.podIP}{end}')
Generate remote cluster manifest:
helm template install/kubernetes/helm/istio-remote --namespace istio-system \
--name istio-remote \
--set global.remotePilotAddress=${PILOT_POD_IP} \
--set global.remotePolicyAddress=${POLICY_POD_IP} \
--set global.remoteTelemetryAddress=${TELEMETRY_POD_IP} \
--set global.proxy.envoyStatsd.enabled=true \
--set global.proxy.envoyStatsd.host=${STATSD_POD_IP} \
${ZIPKIN_POD_IP:+ --set global.remoteZipkinAddress=${ZIPKIN_POD_IP}} > ~/tmp/istio-remote.yaml
Change to remote and apply:
kubectl config use-context cluster-2
kubectl create ns istio-system
kubectl apply -f ~/tmp/istio-remote.yaml
kubectl label namespace default istio-injection=enabled
Create kubeconfig file for main cluster's pilot to access remote:
export WORK_DIR=$(pwd)
CLUSTER_NAME=$(kubectl config view --minify=true -o "jsonpath={.clusters[].name}")
export KUBECFG_FILE=${WORK_DIR}/${CLUSTER_NAME}
SERVER=$(kubectl config view --minify=true -o "jsonpath={.clusters[].cluster.server}")
NAMESPACE=istio-system
SERVICE_ACCOUNT=istio-multi
SECRET_NAME=$(kubectl get sa ${SERVICE_ACCOUNT} -n ${NAMESPACE} -o jsonpath='{.secrets[].name}')
CA_DATA=$(kubectl get secret ${SECRET_NAME} -n ${NAMESPACE} -o "jsonpath={.data['ca\.crt']}")
TOKEN=$(kubectl get secret ${SECRET_NAME} -n ${NAMESPACE} -o "jsonpath={.data['token']}" | base64 --decode)
cat <<EOF > ${KUBECFG_FILE}
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${CA_DATA}
server: ${SERVER}
name: ${CLUSTER_NAME}
contexts:
- context:
cluster: ${CLUSTER_NAME}
user: ${CLUSTER_NAME}
name: ${CLUSTER_NAME}
current-context: ${CLUSTER_NAME}
kind: Config
preferences: {}
users:
- name: ${CLUSTER_NAME}
user:
token: ${TOKEN}
EOF
Create remote cluster secret on main cluster:
kubectl config use-context cluster-1
kubectl create secret generic ${CLUSTER_NAME} --from-file ${KUBECFG_FILE} -n ${NAMESPACE}
kubectl label secret ${CLUSTER_NAME} istio/multiCluster=true -n ${NAMESPACE}
----------------------------------
Bookinfo setup
----------------------------------
kubectl config use-context cluster-1
kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml
kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml
delete reviews-v3 deployment:
kubectl delete deployment reviews-v3
Add reviews-v3 to remote:
kubectl config use-context cluster-2
cat <<EOF > ~/tmp/reviews-v3.yaml
---
##################################################################################################
# Ratings service
##################################################################################################
apiVersion: v1
kind: Service
metadata:
name: ratings
labels:
app: ratings
spec:
ports:
- port: 9080
name: http
---
##################################################################################################
# Reviews service
##################################################################################################
apiVersion: v1
kind: Service
metadata:
name: reviews
labels:
app: reviews
spec:
ports:
- port: 9080
name: http
selector:
app: reviews
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: reviews-v3
spec:
replicas: 1
template:
metadata:
labels:
app: reviews
version: v3
spec:
containers:
- name: reviews
image: istio/examples-bookinfo-reviews-v3:1.5.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9080
EOF
kube apply -f ~/tmp/reviews-v3.yaml
You should be able to access http://<ingress-gateway IP>/productpage and see alternating stars (all colors)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment