Skip to content

Instantly share code, notes, and snippets.

@v1k0d3n
Last active May 25, 2017 17:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save v1k0d3n/a38cc3febb53819e074dc322b9473f2f to your computer and use it in GitHub Desktop.
Save v1k0d3n/a38cc3febb53819e074dc322b9473f2f to your computer and use it in GitHub Desktop.
wget https://gist.githubusercontent.com/v1k0d3n/a38cc3febb53819e074dc322b9473f2f/raw/7234224ba34fd2891a29cbe544637d23f931db89/bootkube-clean.sh
wget https://gist.githubusercontent.com/v1k0d3n/a38cc3febb53819e074dc322b9473f2f/raw/057243cf033f44ce4b4a053d341c5b1723eabcab/bootkube-up.sh
chmod +x bootkube-*
./bootkube-up.sh
#!/bin/bash
# Cleaning up nodes is simple:
printf "Removing bootkube environment from system..."
{ sudo systemctl stop kubelet.service
sudo docker rm bootkube-render
sudo docker stop $(sudo docker ps -a | grep k8s| cut -c1-20 | xargs sudo docker stop)
sudo docker rm -f $(sudo docker ps -a | grep k8s| cut -c1-20 | xargs sudo docker stop)
sudo docker rm -f $(sudo docker ps -a | grep bootkube| cut -c1-20 | xargs sudo docker stop)
sudo docker rm -f $(sudo docker ps -a | grep bootkube| cut -c1-20 | xargs sudo docker stop)
sudo rm -rf /etc/kubernetes/
sudo rm -rf /var/etcd
sudo rm -rf /var/run/calico
sudo rm -rf /var/run/flannel
sudo rm -rf /var/run/kubernetes/*
sudo rm -rf /var/lib/kubelet/*
sudo rm -rf /var/run/lock/kubelet.lock
sudo rm -rf /var/run/lock/api-server.lock
sudo rm -rf /var/run/lock/etcd.lock
sudo rm -rf /var/run/lock/pod-checkpointer.lock
sudo rm -rf /usr/local/bin/bootkube
sudo rm -rf /usr/local/bin/kubectl
sudo rm -rf /usr/local/bin/helm
sudo rm -rf /opt/cni
sudo rm -rf /home/$USER/.bootkube
sudo ip link set flannel.1 down
} &> /dev/null
printf "\nCOMPLETE!\n"
# Done
#!/bin/bash
# BootKube Deployment (FINAL):
## NEW INSTALLATIONS:
sudo apt-get update && sudo apt-get upgrade -y && sudo apt-get install -y docker.io vim ethtool traceroute git build-essential lldpd socat
### PREPARE THE ENVIRONMENT:
export CNI_VERSION=v0.5.2 ### CNI VERSION ###
export HELM_VERSION=v2.3.1 ### HELM VERSION ###
export BOOTKUBE_VERSION=v0.4.1 ### BOOTKUBE VERSION ###
export KUBERNETES_VERSION=v1.6.2 ### KUBERNETES VERSION ###
export KUBE_HW='ens3' ### MODIFY FOR YOUR ENVIRONMENT ###
export NSERVER01='10.3.0.10' ### DO NOT MODIFY FOR CEPH PV ###
export NSERVER02='192.168.1.70' ### MODIFY FOR YOUR ENVIRONMENT ###
export NSERVER03='8.8.8.8' ### MODIFY FOR YOUR ENVIRONMENT ###
export NSEARCH01='svc.cluster.local' ### MODIFY FOR YOUR ENVIRONMENT ###
export NSEARCH02='jinkit.com' ### MODIFY FOR YOUR ENVIRONMENT ###
export KUBE_IMAGE='v1k0d3n/hyperkube-amd64' ### MODIFY FOR YOUR ENVIRONMENT ###
export KUBE_IP=$(ip a s dev $KUBE_HW | awk '/inet /{gsub("/.*", "");print $2}')
echo "Kubernetes Endpoint: $KUBE_IP"
### PREPARE: /etc/resolv.conf
sudo -E bash -c "cat <<EOF > /etc/resolv.conf
nameserver $NSERVER01
nameserver $NSERVER02
nameserver $NSERVER03
search $NSEARCH01 $NSEARCH02
EOF"
### PREPARE: /etc/hosts:
sudo -E bash -c 'echo '$KUBE_IP' '$HOSTNAME' '$HOSTNAME'.'$NSEARCH02' kubernetes >> /etc/hosts'
### PREPARE: /etc/systemd/system/kubelet.service
sudo -E bash -c 'cat <<EOF > /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://kubernetes.io/docs/admin/kubelet/
[Service]
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStart=/usr/local/bin/kubelet \\
--kubeconfig=/etc/kubernetes/kubeconfig \\
--require-kubeconfig \\
--cni-conf-dir=/etc/cni/net.d \\
--cni-bin-dir=/opt/cni/bin \\
--network-plugin=cni \\
--lock-file=/var/run/lock/kubelet.lock \\
--exit-on-lock-contention \\
--pod-manifest-path=/etc/kubernetes/manifests \\
--allow-privileged \\
--cluster_dns='$NSERVER02','$NSERVER03','$NSERVER01' \\
--cluster_domain=cluster.local \\
--node-labels= \\
--hostname-override='$KUBE_IP' \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF'
### DOWNLOAD: bootkube
wget https://github.com/kubernetes-incubator/bootkube/releases/download/$BOOTKUBE_VERSION/bootkube.tar.gz
tar zxvf bootkube.tar.gz
sudo chmod +x bin/linux/bootkube
sudo cp bin/linux/bootkube /usr/local/bin/
### DOWNLOAD: kubectl
wget http://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/amd64/kubectl
sudo chmod +x kubectl
sudo mv kubectl /usr/local/bin/
### DOWNLOAD: cni
wget https://github.com/containernetworking/cni/releases/download/$CNI_VERSION/cni-amd64-$CNI_VERSION.tgz
sudo mkdir -p /opt/cni/bin
sudo tar -xf cni-amd64-$CNI_VERSION.tgz -C /opt/cni/bin/
### DOWNLOAD: kubelet
wget http://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/amd64/kubelet
sudo mv kubelet /usr/local/bin/kubelet
chmod +x /usr/local/bin/kubelet
### DOWNLOAD: helm
wget -O /tmp/helm-$HELM_VERSION-linux-amd64.tar.gz https://storage.googleapis.com/kubernetes-helm/helm-$HELM_VERSION-linux-amd64.tar.gz
tar zxvf /tmp/helm-$HELM_VERSION-linux-amd64.tar.gz -C /tmp/
chmod +x /tmp/linux-amd64/helm
sudo mv /tmp/linux-amd64/helm /usr/local/bin/
sudo rm -rf /tmp/linux-amd64
### CLEANUP:
sudo rm -rf /home/$USER/cni-amd64-$CNI_VERSION.tgz
sudo rm -rf /home/$USER/bootkube.tar.gz
sudo rm -rf /home/$USER/bin
### RENDER ASSETS:
sudo /usr/bin/docker run -v /home/ubuntu:/home/ubuntu quay.io/coreos/bootkube:$BOOTKUBE_VERSION /bootkube render --asset-dir=/home/ubuntu/.bootkube --experimental-self-hosted-etcd --etcd-servers=http://10.3.0.15:12379 --api-servers=https://kubernetes:443
sudo rm -rf /home/ubuntu/.bootkube/manifests/kube-flannel*
### REQUIRED FOR CEPH/OPTIONAL ALL OTHERS:
# sudo grep -rl "quay.io/coreos/hyperkube:$KUBERNETES_VERSION_coreos.0" /home/$USER/.bootkube/ | sudo xargs sed -i 's|quay.io/coreos/hyperkube:$KUBERNETES_VERSION_coreos.0|$KUBE_IMAGE:$KUBERNETES_VERSION|g'
sudo grep -rl quay.io/coreos/hyperkube:$KUBERNETES_VERSION'_coreos.0' /home/$USER/.bootkube/ | sudo xargs sed -i "s|quay.io/coreos/hyperkube:"$KUBERNETES_VERSION"_coreos.0|quay.io/"$KUBE_IMAGE":"$KUBERNETES_VERSION"|g"
### DEPLOY KUBERNETES SELF-HOSTED CLUSTER:
sudo systemctl daemon-reload
sudo systemctl restart kubelet.service
sudo cp /home/ubuntu/.bootkube/auth/kubeconfig /etc/kubernetes/
sudo cp -a /home/$USER/.bootkube/* /etc/kubernetes/
sudo mkdir -p /home/$USER/.kube
sudo cp /etc/kubernetes/kubeconfig /home/$USER/.kube/config
sudo chmod 644 /home/ubuntu/.kube/config
# DEBUG #sudo touch /home/ubuntu/.bootkube/bootkube-up.log
nohup sudo bash -c 'bootkube start --asset-dir=/home/ubuntu/.bootkube &>/dev/null &'
### WAIT FOR KUBERNETES ENVIRONMENT TO COME UP:
function echo_green {
echo -e "${GREEN}$1"; tput sgr0
}
echo -e -n "Waiting for master components to start..."
while true; do
running_count=$(sudo kubectl get pods -n kube-system --no-headers 2>/dev/null | grep "Running" | wc -l)
### Expect 4 bootstrap components for a truly "Ready" state: etcd, apiserver, controller, and scheduler:
if [ "$running_count" -ge 4 ]; then
break
fi
echo -n "."
sleep 1
done
echo_green "SUCCESS"
echo_green "Cluster created!"
echo ""
sudo kubectl cluster-info
sleep 10
### WAIT FOR KUBERNETES API TO COME UP CLEANLY, THEN APPLY FOLLOWING LABELS AND MANIFESTS:
sudo kubectl --kubeconfig=/etc/kubernetes/kubeconfig label node --all node-role.kubernetes.io/canal-node=true
sudo kubectl --kubeconfig=/etc/kubernetes/kubeconfig label node --all node-role.kubernetes.io/master="" --overwrite
sudo kubectl --kubeconfig=/etc/kubernetes/kubeconfig apply -f https://gist.githubusercontent.com/v1k0d3n/0e6ff14cb913c93f0f3ec5c4cceb4915/raw/de8b1d9d3c4018179852ffad28e0c66c22ff724b/canal-etcd.yaml
sudo kubectl --kubeconfig=/etc/kubernetes/kubeconfig apply -f https://gist.githubusercontent.com/v1k0d3n/39c496287bdcd4a5a6aa439050fb7332/raw/cfb20fef7c7404df5dd8a6fe958a1f870ea7b423/canal.yaml
sudo kubectl --kubeconfig=/etc/kubernetes/kubeconfig apply -f https://gist.githubusercontent.com/v1k0d3n/4b58246e88b404fad5b17da95876e61b/raw/b2b205d56de2afad5e9ba768f7d0826c4c13599c/calico-cfg.yaml
printf "\nCOMPLETE!\n"
helm delete --purge magnum
helm delete --purge mistral
helm delete --purge senlin
helm delete --purge barbican
helm delete --purge horizon
helm delete --purge neutron
helm delete --purge nova
helm delete --purge cinder
helm delete --purge heat
helm delete --purge glance
helm delete --purge keystone
helm delete --purge memcached
helm delete --purge rabbitmq-etcd
helm delete --purge rabbitmq
helm delete --purge mariadb
helm delete --purge bootstrap-openstack
helm delete --purge bootstrap-ceph
helm delete --purge ceph
### PREPARE THE ENVIRONMENT:
export OSH_BRANCH='4f1aecb9c4514895a19804ba46d77e8903060f40' ### GIT COMMIT HAS OR BRANCH NAME ###
export SIGIL_VERSION='0.4.0' ### SIGIL VERSION ###
export KUBE_POD_CIDR='10.25.0.0/16' ### SDN POD CIDR RANGE ###
### APPLY DEVELOPMENT-ONLY RBAC POLICIES:
sudo kubectl --kubeconfig=/etc/kubernetes/kubeconfig create -f bootkube-ci/deploy-rbac/dev.yaml --validate=false
### ADD PACKAGE REQUIREMENTS FOR CEPH:
sudo apt-get install -y python-minimal ceph-common
### CLONE AND INSTALL OPENSTACK-HELM:
git clone https://github.com/openstack/openstack-helm.git /home/$USER/openstack-helm && cd /home/$USER/openstack-helm $$ git checkout $OSH_BRANCH
curl -L https://github.com/gliderlabs/sigil/releases/download/v$SIGIL_VERSION/sigil_$SIGIL_VERSION_Linux_x86_64.tgz | sudo tar -zxC /usr/local/bin
kubectl label nodes openstack-control-plane=enabled --all
kubectl label nodes ceph-storage=enabled --all
kubectl label nodes openvswitch=enabled --all
kubectl label nodes openstack-compute-node=enabled --all
helm init
helm serve &
helm repo remove stable
helm repo add local http://localhost:8879/charts
sudo mkdir -p /var/lib/openstack-helm/ceph
sudo mkdir -p /var/lib/nova/instances
export osd_cluster_network=$KUBE_POD_CIDR
export osd_public_network=$KUBE_POD_CIDR
cd helm-toolkit/utils/secret-generator
./generate_secrets.sh all `./generate_secrets.sh fsid`
cd ../../..
make
helm install --name=ceph local/ceph --namespace=ceph
helm install --name=bootstrap-ceph local/bootstrap --namespace=ceph
helm install --name=bootstrap-openstack local/bootstrap --namespace=openstack
helm install --name=mariadb local/mariadb --namespace=openstack
helm install --name=rabbitmq local/rabbitmq --namespace=openstack
helm install --name=rabbitmq-etcd local/etcd --namespace=openstack
helm install --name=memcached local/memcached --namespace=openstack
helm install --name=keystone local/keystone --namespace=openstack
helm install --name=glance local/glance --namespace=openstack
helm install --name=heat local/heat --namespace=openstack
helm install --name=cinder local/cinder --namespace=openstack
helm install --name=nova local/nova --namespace=openstack
helm install --name=neutron local/neutron --namespace=openstack
helm install --name=horizon local/horizon --namespace=openstack
helm install --name=barbican local/barbican --namespace=openstack
helm install --name=senlin local/senlin local/senlin --namespace=openstack
helm install --name=mistral local/mistral --namespace=openstack
helm install --name=magnum local/magnum --namespace=openstack
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment