Skip to content

Instantly share code, notes, and snippets.

@gangliao
Last active July 3, 2018 10:59
Show Gist options
  • Save gangliao/7cc4695c16e249db10bc920840537f6a to your computer and use it in GitHub Desktop.
Save gangliao/7cc4695c16e249db10bc920840537f6a to your computer and use it in GitHub Desktop.
#!/bin/sh
set -xe
#0. clean
sudo rm -rf /etc/kubernetes/
sudo rm -rf ~/node_bin
export http_proxy=http://10.130.14.129:8080
export https_proxy=http://10.130.14.129:8080
export HTTP_PROXY=http://10.130.14.129:8080
export HTTPS_PROXY=http://10.130.14.129:8080
##
# 1. Copy Certificates
#
#
sudo mkdir -p ~/node_bin
sudo wget -P ~/node_bin/ http://eva.sogou-inc.com/cache/k8s/install_node_bin/node_bins.zip
cd ~/node_bin/ && sudo tar -xvf node_bins.zip
sudo mkdir -p /etc/kubernetes/ssl
sudo mv ssl/* /etc/kubernetes/ssl/
sudo mkdir -p ~/.kube
sudo mkdir -p /root/.kube
sudo cp kube/* ~/.kube/
sudo mv kube/* /root/.kube/
sudo mv bin/* /usr/local/bin/
sudo rm -rf ~/node_bins.zip ~/node_bin
cd ~
##
# 2. Init Environment
#
##
node_ip=$(ip -4 addr show scope global dev eth0 | grep inet | awk '{print $2}' | cut -d / -f 1)
cat > environment.sh <<EOF
# TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
BOOTSTRAP_TOKEN="41f7e4ba8b7be874fcff18bf5cf41a7c"
# 最好使用 主机未用的网段 来定义服务网段和 Pod 网段
# 服务网段 (Service CIDR),部署前路由不可达,部署后集群内使用IP:Port可达
SERVICE_CIDR="10.254.0.0/16"
# POD 网段 (Cluster CIDR),部署前路由不可达,**部署后**路由可达(flanneld保证)
CLUSTER_CIDR="172.30.0.0/16"
# 服务端口范围 (NodePort Range)
export NODE_PORT_RANGE="8400-9000"
# etcd 集群服务地址列表
export ETCD_ENDPOINTS="https://10.142.104.73:2379,https://10.141.186.118:2379,https://10.141.186.119:2379"
# flanneld 网络配置前缀
export FLANNEL_ETCD_PREFIX="/kubernetes/network"
# kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
export CLUSTER_DNS_SVC_IP="10.254.0.2"
# 集群 DNS 域名
export CLUSTER_DNS_DOMAIN="cluster.local."
# 当前部署的机器名称(随便定义,只要能区分不同机器即可)
export NODE_NAME=k8s-node-${node_ip}
# 当前部署的机器 IP
export NODE_IP=${node_ip}
# etcd 集群所有机器 IP
export NODE_IPS="10.142.104.73 10.141.186.119 10.141.186.118"
# etcd 集群间通信的IP和端口
export ETCD_NODES=etcd-host0=https://10.142.104.73:2380,etcd-host1=https://10.141.186.119:2380,etcd-host2=https://10.141.186.118:2380
# 替换为 kubernetes maste 集群任一机器 IP
export MASTER_IP=10.142.104.73
export KUBE_APISERVER="https://10.142.104.73:6443"
EOF
sudo mv environment.sh /usr/local/bin/environment.sh
source /usr/local/bin/environment.sh
wget http://eva.sogou-inc.com/cache/k8s/cfssl/cfssl_linux-amd64
sudo chmod +x cfssl_linux-amd64
sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl
wget http://eva.sogou-inc.com/cache/k8s/cfssl/cfssljson_linux-amd64
sudo chmod +x cfssljson_linux-amd64
sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
wget http://eva.sogou-inc.com/cache/k8s/cfssl/cfssl-certinfo_linux-amd64
sudo chmod +x cfssl-certinfo_linux-amd64
sudo mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
#cat >> ~/.bashrc << EOF
#export PATH=/usr/local/bin:$PATH
#EOF
##
# 3. Setup Flannel
#
##
cat > flanneld-csr.json <<EOF
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes flanneld-csr.json | sudo /usr/local/bin/cfssljson -bare flanneld
sudo mkdir -p /etc/flanneld/ssl
sudo mv flanneld*.pem /etc/flanneld/ssl
sudo rm flanneld.csr flanneld-csr.json
cat > flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service
[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \\
-iface=eth0 \\
-etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
-etcd-certfile=/etc/flanneld/ssl/flanneld.pem \\
-etcd-keyfile=/etc/flanneld/ssl/flanneld-key.pem \\
-etcd-endpoints=${ETCD_ENDPOINTS} \\
-etcd-prefix=${FLANNEL_ETCD_PREFIX}
ExecStartPost=/usr/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
sudo cp flanneld.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable flanneld
sudo systemctl start flanneld
sudo systemctl status flanneld
ifconfig flannel.1
##
# 4. Setup K8s Node
#
##
# 设置集群参数
sudo /usr/local/bin/kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
sudo /usr/local/bin/kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
sudo /usr/local/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
sudo /usr/local/bin/kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
sudo mv bootstrap.kubeconfig /etc/kubernetes/
sudo mkdir -p /var/lib/kubelet
cat > kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \\
--address=${NODE_IP} \\
--hostname-override=${NODE_IP} \\
--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest \\
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--require-kubeconfig \\
--cert-dir=/etc/kubernetes/ssl \\
--cluster-dns=${CLUSTER_DNS_SVC_IP} \\
--cluster-domain=${CLUSTER_DNS_DOMAIN} \\
--hairpin-mode promiscuous-bridge \\
--allow-privileged=true \\
--serialize-image-pulls=false \\
--feature-gates=DevicePlugins=true \\
--logtostderr=true \\
--v=2
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
sudo mv kubelet.service /etc/systemd/system/kubelet.service
sudo systemctl daemon-reload
sudo systemctl enable kubelet
sudo systemctl start kubelet
sudo systemctl status kubelet
unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY
kube_node=$(sudo /usr/local/bin/kubectl get csr | grep node-csr | awk '$4=="Pending" {print $1}')
sudo /usr/local/bin/kubectl certificate approve $kube_node || true
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
sudo /usr/local/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | sudo /usr/local/bin/cfssljson -bare kube-proxy
sudo mv kube-proxy*.pem /etc/kubernetes/ssl/
sudo rm kube-proxy.csr kube-proxy-csr.json
# 设置集群参数
sudo /usr/local/bin/kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
# 设置客户端认证参数
sudo /usr/local/bin/kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
--client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
# 设置上下文参数
sudo /usr/local/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
# 设置默认上下文
sudo /usr/local/bin/kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
sudo mv kube-proxy.kubeconfig /etc/kubernetes
sudo mkdir -p /var/lib/kube-proxy
cat > kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \\
--bind-address=${NODE_IP} \\
--hostname-override=${NODE_IP} \\
--cluster-cidr=${CLUSTER_CIDR} \\
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \\
--logtostderr=true \\
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
sudo mv kube-proxy.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable kube-proxy
sudo systemctl start kube-proxy
sudo systemctl status kube-proxy
sudo /usr/local/bin/kubectl get nodes
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment