Skip to content

Instantly share code, notes, and snippets.

@taking
Last active March 25, 2021 08:29
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save taking/843f355679166aed024d00770f7400c7 to your computer and use it in GitHub Desktop.
Save taking/843f355679166aed024d00770f7400c7 to your computer and use it in GitHub Desktop.
curl -Ls https://get.submariner.io | bash
cp ~/.local/bin/subctl /usr/bin
rm -rf ~/.local/bin/
subctl version
curl -O -L https://github.com/projectcalico/calicoctl/releases/download/v3.18.1/calicoctl
chmod +x calicoctl
mv calicoctl /usr/bin
git clone https://github.com/ahmetb/kubectx
cp -r kubectx/kube* /usr/bin/
rm -rf ./kubectx
my_vm_internal_ip="$(hostname -I | awk {'print $1'})"
echo 'Network Add-on is [Calico]'
echo 'Calico Applying...'
# broker
calico_pod_cidr="10.240.0.0/16"
calico_network_cidr="10.110.0.0/16"
instance_public_ip="118.130.73.14"
# cluster 1
calico_pod_cidr1="10.241.0.0/16"
calico_network_cidr1="10.111.0.0/16"
instance_public_ip1="35.184.115.14"
# cluster 2
calico_pod_cidr2="10.242.0.0/16"
calico_network_cidr2="10.112.0.0/16"
instance_public_ip2="34.72.122.246"
# cluster 3
calico_pod_cidr3="10.243.0.0/16"
calico_network_cidr3="10.113.0.0/16"
instance_public_ip3="3.141.43.216"
kubeadm init --v=5 --pod-network-cidr=${calico_pod_cidr} --service-cidr=${calico_network_cidr} --apiserver-cert-extra-sans "${my_vm_internal_ip},${instance_public_ip}"&&
mkdir -p $HOME/.kube &&
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config &&
chown $(id -u):$(id -g) $HOME/.kube/config
# broker
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-broker/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-broker' &&
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-broker' &&
kubectl config rename-context kubernetes-admin@kubernetes submariner-broker &&
sed -i 's/ name: kubernetes/ name: submariner-broker/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-broker/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-broker
kubectl create clusterrolebinding submariner-broker \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-broker
# cluster 1
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-1/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-1' &&
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-1' &&
kubectl config rename-context kubernetes-admin@kubernetes submariner-1 &&
sed -i 's/ name: kubernetes/ name: submariner-1/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-1/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-1 &&
kubectl create clusterrolebinding submariner-1 \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-1
# cluster 2
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-2/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-2' &&
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-2' &&
kubectl config rename-context kubernetes-admin@kubernetes submariner-2 &&
sed -i 's/ name: kubernetes/ name: submariner-2/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-2/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-2 &&
kubectl create clusterrolebinding submariner-2 \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-2
# cluster 3
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-3/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-3'
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-3'
kubectl config rename-context kubernetes-admin@kubernetes submariner-3 &&
sed -i 's/ name: kubernetes/ name: submariner-3/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-3/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-3 &&
kubectl create clusterrolebinding submariner-3 \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-3
# broker
# 해당 부분은 broker에서 모든 cluster를 관리하기 위함
KUBECONFIG=config0:config1:config2:config3 kubectl config view --flatten > merge_config &&
yes | cp ~/.kube/merge_config ~/.kube/config &&
kubectl config view
for cluster in $(kubectx);
do
kubectx $cluster;
echo $cluster;
echo "Installing cluster: ${cluster} .........\n"
kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
done
for cluster in $(kubectx);
do
kubectx $cluster;
echo $cluster;
echo "Installing cluster: ${cluster} .........\n"
kubectl delete -f https://docs.projectcalico.org/manifests/custom-resources.yaml
done
kubectl --context submariner-broker create -f - <<EOF
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 10.240.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
EOF
kubectl --context submariner-1 create -f - <<EOF
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 10.241.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
EOF
kubectl --context submariner-2 create -f - <<EOF
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 10.242.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
EOF
kubectl --context submariner-3 create -f - <<EOF
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
calicoNetwork:
ipPools:
- blockSize: 26
cidr: 10.243.0.0/16
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
EOF
calicoctl --context submariner-1 create -f - <<EOF
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: svc-c2
spec:
cidr: 10.112.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: pod-c2
spec:
cidr: 10.242.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: svc-c3
spec:
cidr: 10.113.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: pod-c3
spec:
cidr: 10.243.0.0/16
natOutgoing: false
disabled: true
EOF
calicoctl --context submariner-2 create -f - <<EOF
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: svc-c1
spec:
cidr: 10.111.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: pod-c1
spec:
cidr: 10.241.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: svc-c3
spec:
cidr: 10.113.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: pod-c3
spec:
cidr: 10.243.0.0/16
natOutgoing: false
disabled: true
EOF
calicoctl --context submariner-3 create -f - <<EOF
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: svc-c1
spec:
cidr: 10.111.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: pod-c1
spec:
cidr: 10.241.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: svc-c2
spec:
cidr: 10.112.0.0/16
natOutgoing: false
disabled: true
---
apiVersion: projectcalico.org/v3
kind: IPPool
metadata:
name: pod-c2
spec:
cidr: 10.242.0.0/16
natOutgoing: false
disabled: true
EOF
# broker
subctl deploy-broker --kubecontext submariner-broker --service-discovery
# cluster 1
kubectl --context submariner-1 label nodes submariner-1 submariner.io/gateway=true --overwrite &&
subctl join --kubecontext submariner-1 broker-info.subm --clusterid submariner-1
# cluster 2
kubectl --context submariner-2 label nodes submariner-2 submariner.io/gateway=true --overwrite &&
subctl join --kubecontext submariner-2 broker-info.subm --clusterid submariner-2
# cluster 3
kubectl --context submariner-3 label nodes submariner-3 submariner.io/gateway=true --overwrite &&
subctl join --kubecontext submariner-3 broker-info.subm --clusterid submariner-3
kubectl -n submariner-operator describe Gateway
# broker
kubectl --context=submariner-broker -n submariner-k8s-broker get clusters.submariner.io
# cluster
for cluster in $(kubectx);
do
kubectx $cluster;
echo $cluster;
echo "cluster: ${cluster} .........\n"
kubectl --context ${cluster} get clusters.submariner.io -n submariner-operator
kubectl --context ${cluster} get endpoints.submariner.io -n submariner-operator
done
curl -Ls https://get.submariner.io | bash
cp ~/.local/bin/subctl /usr/bin
rm -rf ~/.local/bin/
subctl version
git clone https://github.com/ahmetb/kubectx
cp -r kubectx/kube* /usr/bin/
rm -rf ./kubectx
my_vm_internal_ip="$(hostname -I | awk {'print $1'})"
echo 'Network Add-on is [Flannel]'
echo 'Flannel Applying...'
# broker
pod_network_cidr="10.240.0.0/16"
service_cidr="10.110.0.0/16"
instance_public_ip="118.130.73.14"
# cluster 1
pod_network_cidr1="10.241.0.0/16"
service_cidr1="10.111.0.0/16"
instance_public_ip1="35.184.115.14"
# cluster 2
pod_network_cidr2="10.242.0.0/16"
service_cidr2="10.112.0.0/16"
instance_public_ip2="34.72.122.246"
# cluster 3
pod_network_cidr3="10.243.0.0/16"
service_cidr3="10.113.0.0/16"
instance_public_ip3="3.141.43.216"
kubeadm init --pod-network-cidr=${pod_network_cidr} --service-cidr=${service_cidr} --apiserver-cert-extra-sans "${my_vm_internal_ip},${instance_public_ip}" &&
mkdir -p $HOME/.kube &&
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config &&
chown $(id -u):$(id -g) $HOME/.kube/config
# broker
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-broker/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-broker' &&
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-broker' &&
kubectl config rename-context kubernetes-admin@kubernetes submariner-broker &&
sed -i 's/ name: kubernetes/ name: submariner-broker/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-broker/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-broker
kubectl create clusterrolebinding submariner-broker \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-broker
# cluster 1
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-1/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-1' &&
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-1' &&
kubectl config rename-context kubernetes-admin@kubernetes submariner-1 &&
sed -i 's/ name: kubernetes/ name: submariner-1/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-1/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-1 &&
kubectl create clusterrolebinding submariner-1 \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-1
# cluster 2
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-2/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-2' &&
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-2' &&
kubectl config rename-context kubernetes-admin@kubernetes submariner-2 &&
sed -i 's/ name: kubernetes/ name: submariner-2/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-2/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-2 &&
kubectl create clusterrolebinding submariner-2 \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-2
# cluster 3
iptables -P FORWARD ACCEPT
kubectl taint nodes --all node-role.kubernetes.io/master-
kubectl get configmaps -n kube-system kubeadm-config -o yaml | sed 's/ clusterName: kubernetes/ clusterName: submariner-3/g' | kubectl replace -f - &&
kubectl config set-context kubernetes-admin@kubernetes --cluster='submariner-3'
kubectl config set-context kubernetes-admin@kubernetes --user='submariner-3'
kubectl config rename-context kubernetes-admin@kubernetes submariner-3 &&
sed -i 's/ name: kubernetes/ name: submariner-3/g' ~/.kube/config &&
sed -i 's/- name: kubernetes-admin/- name: submariner-3/g' ~/.kube/config &&
kubectl get nodes
kubectl -n kube-system create serviceaccount submariner-3 &&
kubectl create clusterrolebinding submariner-3 \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:submariner-3
# broker
# 해당 부분은 broker에서 모든 cluster를 관리하기 위함
KUBECONFIG=config0:config1:config2:config3 kubectl config view --flatten > merge_config &&
yes | cp ~/.kube/merge_config ~/.kube/config &&
kubectl config view
# $(pod_network_cidr} 수정 필요
for cluster in $(kubectx);
do
kubectx $cluster;
echo $cluster;
echo "Installing cluster: ${cluster} .........\n"
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -O ./kube-flannel.yml
sed -i '128s/.*/ "Network": "flanner_cidr_changeme"\,/g' kube-flannel.yml
sed -i "s|flanner_cidr_changeme|${pod_network_cidr}|g" kube-flannel.yml
kubectl apply -f kube-flannel.yml
echo '[flannel] Success'
done
# broker
subctl deploy-broker --kubecontext submariner-broker --service-discovery
# cluster 1
kubectl --context submariner-1 label nodes submariner-1 submariner.io/gateway=true --overwrite &&
subctl join --kubecontext submariner-1 broker-info.subm --clusterid submariner-1
# cluster 2
kubectl --context submariner-2 label nodes submariner-2 submariner.io/gateway=true --overwrite &&
subctl join --kubecontext submariner-2 broker-info.subm --clusterid submariner-2
# cluster 3
kubectl --context submariner-3 label nodes submariner-3 submariner.io/gateway=true --overwrite &&
subctl join --kubecontext submariner-3 broker-info.subm --clusterid submariner-3
kubectl -n submariner-operator describe Gateway
# broker
kubectl --context=submariner-broker -n submariner-k8s-broker get clusters.submariner.io
# cluster
for cluster in $(kubectx);
do
kubectx $cluster;
echo $cluster;
echo "cluster: ${cluster} .........\n"
kubectl --context ${cluster} get clusters.submariner.io -n submariner-operator
kubectl --context ${cluster} get endpoints.submariner.io -n submariner-operator
done
@taking
Copy link
Author

taking commented Mar 23, 2021

기본 nginx를 deployment 하여, pod,svc 생성

kubectl --context submariner-1 create deployment nginx --image=nginx
kubectl --context submariner-1 expose deployment nginx --port=80

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment