Skip to content

Instantly share code, notes, and snippets.

@amon-ra
Created July 18, 2016 11:53
Show Gist options
  • Save amon-ra/56b4a63cc590f32ae70f838aa47c1469 to your computer and use it in GitHub Desktop.
Save amon-ra/56b4a63cc590f32ae70f838aa47c1469 to your computer and use it in GitHub Desktop.
Kubernetes with network policy one node install.
#!/bin/bash
#/etc/modules
# af_packet xt_set ip6_tables ipip overlay xt_REDIRECT xt_nat xt_comment nft_reject_ipv4 nft_reject nf_tables xt_conntrack ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_nat_ipv4 xt_addrtype iptable_filter br_netfilter nf_nat bridge stp
#/etc/conf.d/docker
# DOCKER_OPTS="-g /mnt/docker --insecure-registry=10.0.0.0/8 --iptables=false --bridge=none --ip-masq=false --userland-proxy=false --mtu=9000"
#/etc/init.d/docker
# mkdir -p /var/lib/kubelet
# mount -o bind /var/lib/kubelet /var/lib/kubelet
# mount --make-shared /var/lib/kubelet
#gcr.io/google_containers/hyperkube-${ARCH}
#versions
#curl -k -s -X GET https://gcr.io/v2/google_containers/hyperkube/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/etcd/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/skydns/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/exechealthz/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/leader-elector/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/kube2sky/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/pause/tags/list
#curl -k -s -X GET https://gcr.io/v2/google-containers/kube-addon-manager-amd64/tags/list
#curl -k -s -X GET https://gcr.io/v2/google_containers/nginx-ingress-controller/tags/list
export LOGLEVEL=5
export POL_LOGLEVEL="debug"
export INFRA_IMAGE="gcr.io/google_containers/pause:3.0"
# KUBE2SKY_VERSION=1.15
# SKYDNS_VERSION=2015-10-13-8c72f8c
export EXECHEALTHZ_VERSION=1.0
export ETCD_VERSION=2.2.5
export ELECTOR_VERSION=0.4
export CALICO_VERSION=v0.20.0
export CALICO_POLAGENT_VERSION=v0.1.4
export K8S_POLICY_VERSION=v0.1.4
export KUBEDNS_VERSION=1.3
export DNSMASQ_VERSION=1.1
export DASHBOARD_VERSION=v1.0.1
export HEAPSTER_VERSION=v1.0.2
export INFLUXDB_VERSION=v0.5
export GRAFANA_VERSION=v2.6.0-2
export ADDONRESIZER_VERSION=1.0
export ADDONMANAGER_VERSION=v2
export NGINX_VERSION=0.7
export KUBELEGO_VERSION=0.0.2
export FLUENTD_VERSION=0.1
#K8S_VERSION=$(curl -sS https://storage.googleapis.com/kubernetes-release/release/stable.txt)
export K8S_VERSION=v1.2.4
export ARCH=amd64
# HYPERKUBE_IMAGE="quay.io/coreos/hyperkube:${K8S_VERSION}_coreos.cni.1"
#export HYPERKUBE_IMAGE="gcr.io/google_containers/hyperkube:${K8S_VERSION}"
export HYPERKUBE_IMAGE="oondeo/hyperkube"
export ADVERTISE_IP=172.20.0.2
export POD_NETWORK=10.128.0.0/16
export SERVICE_IP_RANGE=10.0.0.1/16
export K8S_SERVICE_IP=10.0.0.1
export DNS_SERVICE_IP=10.0.0.10
export DNS_DOMAIN=cluster.local
export MASTER_HOST=kube-dev.$DNS_DOMAIN
export DOCKER_DIR="/mnt/docker"
export DNS_REPLICAS=1
export METRICS_MEMORY="200Mi"
export ETCD_ENDPOINTS="http://127.0.0.1:4001"
export ETCD_AUTHORITY="127.0.0.1:4001"
#ETCD_SERVER="http://127.0.0.1:2379"
export OTHER_DOMAINS=",DNS:prod.oondeo.es,DNS:test.oondeo.es"
export SSL_NAMES="IP:$ADVERTISE_IP,IP:$K8S_SERVICE_IP,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${MASTER_HOST},DNS:${DNS_DOMAIN}${OTHER_DOMAINS}"
if [ "$1" == "clean" ]
then
kubectl delete deployment --all
kubectl delete pods --all
kubectl delete pv --all
docker rm -f kubemaster
docker rm -f $(docker ps -a -q)
umount /var/lib/kubelet
rm -rf /etc/kubernetes/addons /etc/kubernetes/manifests /etc/kubernetes/cni/net.d /var/run/calico /var/log/containers /var/log/calico /var/lib/etcd /var/lib/kubelet
docker rm -f $(docker ps -a -q)
exit 0
fi
#modprobe -a overlay xt_set ip6_tables
if [ ! -f /root/kubectl ];then
curl -L http://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl -o /root/kubectl
chmod +x /root/kubectl
fi
if [ ! -f /root/calicoctl ];then
${CALICO_VERSION}/calicoctl -o /root/calicoctl
chmod +x /root/calicoctl
fi
if [ ! -f /root/calicoctl ];then
curl -L https://github.com/projectcalico/k8s-policy/releases/download/${K8S_POLICY_VERSION}/policy -o /root/policy
chmod +x policy
fi
# for i in ${HYPERKUBE_IMAGE} oondeo/kubectl gcr.io/google_containers/kubedns-${ARCH}:${KUBEDNS_VERSION} gcr.io/google_containers/dnsmasq:${DNSMASQ_VERSION} gcr.io/google_containers/etcd-${ARCH}:${ETCD_VERSION} gcr.io/google_containers/leader-elector:${ELECTOR_VERSION} gcr.io/google_containers/exechealthz:${EXECHEALTHZ_VERSION} calico/k8s-policy-agent:${CALICO_POLAGENT_VERSION} calico/ctl:${CALICO_VERSION} calico/node:${CALICO_VERSION}
# do
# echo "downloading $i"
# docker pull $i
# if [ $? != "0" ]
# then
# echo "Error downloading: $i"
# exit 1
# fi
# done
mkdir -p /etc/kubernetes/ssl /etc/kubernetes/manifests /etc/kubernetes/addons /etc/kubernetes/cni/net.d /var/run/calico /var/log/containers /var/log/calico /var/lib/etcd /var/lib/kubelet
if [ ! -f /etc/kubernetes/server.cert ]
then
cd /etc/kubernetes
#server.key server.cert ca.crt
docker run -d --net=host -v /etc/localtime:/etc/localtime:ro -v ${PWD}:/data --name setupssl ${HYPERKUBE_IMAGE} /setup-files.sh "IP:$ADVERTISE_IP,IP:$K8S_SERVICE_IP,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${MASTER_HOST},DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${DNS_DOMAIN}${OTHER_DOMAINS}"
sleep 60
#mv *.csv /etc/kubernetes
#"IP:10.0.0.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
# openssl genrsa -out ca-key.pem 2048
# openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.crt -subj "/CN=kube-ca"
# openssl genrsa -out server.key 2048
# openssl req -new -sha256 -key server.key -subj "/CN=dev.oondeo.es" \
# -reqexts SAN -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=$SSL_NAMES")) \
# -out master.csr
# openssl x509 -req -in master.csr -CA ca.crt -CAkey ca-key.pem -CAcreateserial -out server.cert -days 730
# openssl genrsa -out admin-key.pem 2048
# openssl req -new -key admin-key.pem -out admin.csr -subj "/CN=kube-admin"
# openssl x509 -req -in admin.csr -CA ca.crt -CAkey ca-key.pem -CAcreateserial -out admin.pem -days 730
fi
# sleep 5
#-e ETCD_AUTHORITY=${ETCD_AUTHORITY} -e IP=${ADVERTISE_IP} -e CALICO_NETWORKING=true -e HOSTNAME="${ADVERTISE_IP}" \
# cat > /usr/local/bin/calicoctl <<CALICOCTL
# #!/bin/bash
#
# docker run -it --net=host --privileged --rm \
# -v /dev:/dev:rw -v /sys:/sys:rw \
# -v /etc/localtime:/etc/localtime:ro -v /var/run:/var/run:rw \
# -v /var/log/calico:/var/log/calico:rw -v /lib/modules:/lib/modules:ro \
# -e FELIX_ETCDENDPOINTS=${ETCD_ENDPOINTS} -e FELIX_FELIXHOSTNAME=${ADVERTISE_IP} \
# -e CALICO_NETWORKING=true -e IP=${ADVERTISE_IP} \
# -e ETCD_ENDPOINTS="${ETCD_ENDPOINTS}" -e HOSTNAME="${ADVERTISE_IP}" \
# calico/ctl:${CALICO_VERSION} \$*
#
# CALICOCTL
cat > /usr/local/bin/calicoctl <<CALICOCTL
#!/bin/bash
docker exec -ti kubemaster sh -c "cd '/rootfs\${PWD}';/usr/bin/calicoctl \$* "
CALICOCTL
cat > /usr/local/bin/policy <<CALICOCTL
#!/bin/bash
docker exec -ti kubemaster sh -c "cd '/rootfs\${PWD}';/usr/bin/policy \$* "
CALICOCTL
#export CONTEXT=$(kubectl config view | grep current-context | awk '{print $2}')
cat > /usr/local/bin/kubectl <<KUBECTL
#!/bin/bash
docker exec -ti kubemaster sh -c "cd '/rootfs\${PWD}';/usr/bin/kubectl \$* "
KUBECTL
chmod +x /usr/local/bin/calicoctl
chmod +x /usr/local/bin/policy
chmod +x /usr/local/bin/kubectl
modprobe -a xt_set
modprobe -a ip6_tables
cat > /etc/kubernetes/manifests/etcd.yaml <<ETCDFILE
# docker run -d --net=host --name etcd-master --restart=always \
# -e ETCD_DATA_DIR="/var/lib/etcd/data" -e ETCD_NAME="${MASTER_HOST}" \
# -v /etc/localtime:/etc/localtime:ro -v /var/lib/etcd:/var/lib/etcd \
# gcr.io/google_containers/etcd-${ARCH}:${ETCD_VERSION} /usr/local/bin/etcd
#
# ETCD_NAME={{ ansible_hostname }}
# ETCD_DATA_DIR={{ etcd_data_dir }}
# #ETCD_SNAPSHOT_COUNTER="10000"
# #ETCD_HEARTBEAT_INTERVAL="100"
# #ETCD_ELECTION_TIMEOUT="1000"
# #ETCD_MAX_SNAPSHOTS="5"
# #ETCD_MAX_WALS="5"
# #ETCD_CORS=""
#
# {% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 0 %}
# #[cluster]
# ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
# ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
# ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
# ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
# #ETCD_DISCOVERY=""
# #ETCD_DISCOVERY_SRV=""
# #ETCD_DISCOVERY_FALLBACK="proxy"
# #ETCD_DISCOVERY_PROXY=""
# {% endif %}
#
# {% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 0 and networking == "contiv" %}
# ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_legacy_urls }}
# {% else %}
# ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
# {% endif %}
#
# {% if networking == "contiv" %}
# ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_legacy_urls }}
# ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_legacy_urls }}
# {% else %}
# ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
# ETCD_LISTEN_CLIENT_URLS="{{ etcd_listen_client_urls }}"
# {% endif %}
#
# #[proxy]
# {% if networking == "contiv" and inventory_hostname in groups['nodes'] %}
# ETCD_PROXY="on"
# {% else %}
# ETCD_PROXY="off"
# {% endif %}
#
# #[security]
# {% if etcd_url_scheme == 'https' -%}
# ETCD_CA_FILE={{ etcd_ca_file }}
# ETCD_CERT_FILE={{ etcd_cert_file }}
# ETCD_KEY_FILE={{ etcd_key_file }}
# {% endif -%}
# {% if etcd_peer_url_scheme == 'https' -%}
# ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
# ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
# ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
# {% endif -%}
---
#etcd
apiVersion: v1
kind: Pod
metadata:
name: k8s-etcd
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: k8s-etcd
image: gcr.io/google_containers/etcd:${ETCD_VERSION}
command:
- /usr/local/bin/etcd
- --listen-client-urls=${ETCD_ENDPOINTS}
- --advertise-client-urls=${ETCD_ENDPOINTS}
- --data-dir=/var/etcd/data
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /var/etcd
name: etc-kubernetes-host
readOnly: false
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
- hostPath:
path: /etc/ssl/certs
name: ssl-certs-host
- hostPath:
path: /var/lib/etcd
name: etc-kubernetes-host
ETCDFILE
cat > /etc/kubernetes/manifests/master.yaml <<MASTERFILE
---
#master
apiVersion: v1
kind: Pod
metadata:
name: k8s-master
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: controller-manager
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- controller-manager
- --master=127.0.0.1:8080
- --service-account-private-key-file=/srv/kubernetes/server.key
- --root-ca-file=/srv/kubernetes/ca.crt
- --min-resync-period=3m
- --v=${LOGLEVEL}
volumeMounts:
- name: data
mountPath: /srv/kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- name: apiserver
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- apiserver
- --bind-address=0.0.0.0
- --secure-port=8443
- --service-cluster-ip-range=${SERVICE_IP_RANGE}
- --insecure-bind-address=127.0.0.1
- --etcd-servers="${ETCD_ENDPOINTS}"
- --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --client-ca-file=/srv/kubernetes/ca.crt
- --basic-auth-file=/srv/kubernetes/basic_auth.csv
- --min-request-timeout=300
- --tls-cert-file=/srv/kubernetes/server.cert
- --tls-private-key-file=/srv/kubernetes/server.key
- --token-auth-file=/srv/kubernetes/known_tokens.csv
- --runtime-config=extensions/v1beta1=true,extensions/v1beta1/thirdpartyresources=true
- --allow-privileged=true
- --v=${LOGLEVEL}
volumeMounts:
- name: data
mountPath: /srv/kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- name: scheduler
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- scheduler
- --master=127.0.0.1:8080
- --v=${LOGLEVEL}
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
- hostPath:
path: /etc/kubernetes
name: data
- hostPath:
path: /etc/ssl/certs
name: ssl-certs-host
MASTERFILE
cat > /etc/kubernetes/manifests/proxy.yaml <<PROXYFILE
---
#kube-proxy
#https://github.com/kubernetes/kubernetes/blob/master/cluster/images/hyperkube/addons/kube-proxy.yaml
#apiVersion: extensions/v1beta1
apiVersion: v1
kind: Pod
metadata:
name: k8s-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: k8s-proxy
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- proxy
- --master=http://127.0.0.1:8080
- --v=${LOGLEVEL}
- --proxy-mode=iptables
- --resource-container=""
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /srv/kubernetes
name: etc-kubernetes-host
readOnly: true
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
- hostPath:
path: /etc/ssl/certs
name: ssl-certs-host
- hostPath:
path: /etc/kubernetes
name: etc-kubernetes-host
PROXYFILE
cat > /etc/kubernetes/manifests/calico.yaml <<CALICOFILE
---
#calico policy agent
apiVersion: v1
kind: Pod
metadata:
name: calico-policy-agent
namespace: calico-system
spec:
hostNetwork: true
containers:
# The Calico policy agent.
- name: calico-policy-agent
image: calico/k8s-policy-agent:${CALICO_POLAGENT_VERSION}
imagePullPolicy: IfNotPresent
env:
- name: ETCD_ENDPOINTS
value: "${ETCD_ENDPOINTS}"
- name: K8S_API
value: "http://127.0.0.1:8080"
- name: LEADER_ELECTION
value: "true"
- name: LOG_LEVEL
value: "info"
volumeMounts:
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
# Leader election container used by the agent.
- name: leaderelect
image: gcr.io/google_containers/leader-elector:${ELECTOR_VERSION}
imagePullPolicy: IfNotPresent
args:
- --election=calico-policy-election
- --election-namespace=calico-system
- --http=127.0.0.1:4040
# ports:
# - containerPort: 4040
# protocol: TCP
volumeMounts:
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
CALICOFILE
#not working in version 1.2.4
cat > /root/addon-manager.json <<ADDONFILE
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "kube-addon-manager",
"namespace": "kube-system"
},
"spec": {
"hostNetwork": true,
"containers": [
{
"name": "kube-addon-manager",
"image": "gcr.io/google-containers/kube-addon-manager-amd64:${ADDONMANAGER_VERSION}",
"resources": {
"requests": {
"cpu": "5m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "addons",
"mountPath": "/etc/kubernetes",
"readOnly": true
}
]
}
],
"volumes":[
{
"name": "addons",
"hostPath": {
"path":"/etc/kubernetes"
}
}
]
}
}
ADDONFILE
mkdir -p /etc/kubernetes/addons/dns
cat > /etc/kubernetes/addons/dns/dns-service.yaml <<DNSFILE
#skydns
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file should be kept in sync with https://github.com/kubernetes/kubernetes/blob/master/cluster/images/hyperkube/addons/dns-rc.yaml
# TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*
# __MACHINE_GENERATED_WARNING__
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
clusterIP: ${DNS_SERVICE_IP}
selector:
k8s-app: kube-dns
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
DNSFILE
cat > /etc/kubernetes/addons/dns/dns-controller.yaml <<DNSFILE
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v14
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
version: v14
spec:
replicas: ${DNS_REPLICAS}
selector:
k8s-app: kube-dns
version: v14
template:
metadata:
labels:
k8s-app: kube-dns
version: v14
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-${ARCH}:${KUBEDNS_VERSION}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
timeoutSeconds: 5
args:
# command = "/kube-dns"
- --domain=${DNS_DOMAIN}
- --dns-port=10053
#- --kubecfg-file=/etc/kubernetes/master-kubeconfig.yaml
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
volumeMounts:
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
- name: kubernetes-etc
mountPath: /srv/kubernetes
readOnly: True
- name: dnsmasq
image: gcr.io/google_containers/dnsmasq:${DNSMASQ_VERSION}
args:
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
volumeMounts:
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
- name: healthz
image: gcr.io/google_containers/exechealthz-${ARCH}:${EXECHEALTHZ_VERSION}
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
args:
- -cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null
- -port=8080
ports:
- containerPort: 8080
protocol: TCP
volumeMounts:
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
dnsPolicy: Default # Don't use cluster DNS.
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
- hostPath:
path: /etc/kubernetes
name: kubernetes-etc
DNSFILE
mkdir -p /etc/kubernetes/addons/dashboard
cat > /etc/kubernetes/addons/dashboard/dashboard-controller.yaml <<DASHBOARDFILE
#dashboard
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file should be kept in sync with cluster/addons/dashboard/dashboard-controller.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
version: ${DASHBOARD_VERSION}
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
app: kubernetes-dashboard
version: ${DASHBOARD_VERSION}
kubernetes.io/cluster-service: "true"
template:
metadata:
labels:
app: kubernetes-dashboard
version: ${DASHBOARD_VERSION}
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kubernetes-dashboard
# ARCH will be replaced with the architecture it's built for. Check out the Makefile for more details
image: gcr.io/google_containers/kubernetes-dashboard-${ARCH}:${DASHBOARD_VERSION}
imagePullPolicy: Always
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 9090
protocol: TCP
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
DASHBOARDFILE
cat > /etc/kubernetes/addons/dashboard/dashboard-service.yaml <<DASHBOARDFILE
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file should be kept in sync with cluster/addons/dashboard/dashboard-service.yaml
kind: Service
apiVersion: v1
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Dashboard"
spec:
type: NodePort
externalIPs:
- 172.20.0.2
ports:
- port: 3001
targetPort: 9090
selector:
app: kubernetes-dashboard
DASHBOARDFILE
mkdir -p /etc/kubernetes/addons/influxdb/
cat > /etc/kubernetes/addons/influxdb/grafana-service.yaml <<MONCONFIG
apiVersion: v1
kind: Service
metadata:
name: monitoring-grafana
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Grafana"
spec:
# On production clusters, consider setting up auth for grafana, and
# exposing Grafana either using a LoadBalancer or a public IP.
# type: LoadBalancer
ports:
type: NodePort
externalIPs:
- 172.20.0.2
ports:
- port: 3000
targetPort: 3000
selector:
k8s-app: influxGrafana
MONCONFIG
cat > /etc/kubernetes/addons/influxdb/heapster-controller.yaml <<MONCONFIG
# {% set metrics_memory = "200Mi" -%}
# {% set eventer_memory = "200Mi" -%}
# {% set metrics_memory_per_node = 4 -%}
# {% set eventer_memory_per_node = 500 -%}
# {% set num_nodes = pillar.get('num_nodes', -1) -%}
# {% if num_nodes >= 0 -%}
# {% set metrics_memory = (200 + num_nodes * metrics_memory_per_node)|string + "Mi" -%}
# {% set eventer_memory = (200 * 1024 + num_nodes * eventer_memory_per_node)|string + "Ki" -%}
# {% endif -%}
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-${HEAPSTER_VERSION}
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
version: ${HEAPSTER_VERSION}
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: ${HEAPSTER_VERSION}
template:
metadata:
labels:
k8s-app: heapster
version: ${HEAPSTER_VERSION}
spec:
containers:
- image: gcr.io/google_containers/heapster:${HEAPSTER_VERSION}
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: ${METRICS_MEMORY}
requests:
cpu: 100m
memory: ${METRICS_MEMORY}
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --metric_resolution=60s
- image: gcr.io/google_containers/heapster:${HEAPSTER_VERSION}
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: ${METRICS_MEMORY}
requests:
cpu: 100m
memory: ${METRICS_MEMORY}
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/addon-resizer:${ADDONRESIZER_VERSION}
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=${METRICS_MEMORY}
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-${HEAPSTER_VERSION}
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:${ADDONRESIZER_VERSION}
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=${METRICS_MEMORY}
- --extra-memory=500Ki
- --threshold=5
- --deployment=heapster-${HEAPSTER_VERSION}
- --container=eventer
- --poll-period=300000
MONCONFIG
cat > /etc/kubernetes/addons/influxdb/heapster-service.yaml <<MONCONFIG
kind: Service
apiVersion: v1
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Heapster"
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
MONCONFIG
cat > /etc/kubernetes/addons/influxdb/influxdb-grafana-controller.yaml <<MONCONFIG
apiVersion: v1
kind: ReplicationController
metadata:
name: monitoring-influxdb-grafana-v3
namespace: kube-system
labels:
k8s-app: influxGrafana
version: v3
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: influxGrafana
version: v3
template:
metadata:
labels:
k8s-app: influxGrafana
version: v3
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/heapster_influxdb:${INFLUXDB_VERSION}
name: influxdb
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 500Mi
requests:
cpu: 100m
memory: 500Mi
ports:
- containerPort: 8083
- containerPort: 8086
volumeMounts:
- name: influxdb-persistent-storage
mountPath: /data
- image: gcr.io/google_containers/heapster_grafana:${GRAFANA_VERSION}
name: grafana
env:
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
# This variable is required to setup templates in Grafana.
- name: INFLUXDB_SERVICE_URL
value: http://monitoring-influxdb:8086
# The following env variables are required to make Grafana accessible via
# the kubernetes api-server proxy. On production clusters, we recommend
# removing these env variables, setup auth for grafana, and expose the grafana
# service using a LoadBalancer or a public IP.
- name: GF_AUTH_BASIC_ENABLED
value: "false"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin
#- name: GF_SERVER_ROOT_URL
# value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
volumeMounts:
- name: grafana-persistent-storage
mountPath: /var
volumes:
- name: influxdb-persistent-storage
emptyDir: {}
- name: grafana-persistent-storage
emptyDir: {}
MONCONFIG
cat > /etc/kubernetes/addons/influxdb/influxdb-service.yaml <<MONCONFIG
apiVersion: v1
kind: Service
metadata:
name: monitoring-influxdb
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "InfluxDB"
spec:
ports:
- name: http
port: 8083
targetPort: 8083
- name: api
port: 8086
targetPort: 8086
selector:
k8s-app: influxGrafana
MONCONFIG
cat > /etc/kubernetes/manifests/kube-namespace.yaml <<KUBECONFIG
apiVersion: v1
kind: Namespace
metadata:
name: kube-system
KUBECONFIG
cat > /etc/kubernetes/manifests/calico-namespace.yaml <<KUBECONFIG
apiVersion: v1
kind: Namespace
metadata:
name: calico-system
KUBECONFIG
cat > /etc/kubernetes/manifests/network-policy.yaml <<KUBECONFIG
kind: ThirdPartyResource
apiVersion: extensions/v1beta1
metadata:
name: network-policy.net.alpha.kubernetes.io
description: "Specification for a network isolation policy"
versions:
- name: v1alpha1
KUBECONFIG
mkdir -p /etc/kubernetes/addons/kube-lego
cat > /etc/kubernetes/addons/kube-lego/kube-lego-configmap.yaml <<KUBECONFIG
apiVersion: v1
metadata:
name: kube-lego
namespace: kube-system
data:
# modify this to specify your address
lego.email: "info@oondeo.es"
# configre letencrypt's production api
lego.url: "https://acme-v01.api.letsencrypt.org/directory"
kind: ConfigMap
KUBECONFIG
cat > /etc/kubernetes/addons/kube-lego/kube-lego-deployment.yaml <<KUBECONFIG
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-lego
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: kube-lego
spec:
containers:
- name: kube-lego
image: jetstack/kube-lego:${KUBELEGO_VERSION}
imagePullPolicy: Always
ports:
- containerPort: 8080
env:
- name: LEGO_EMAIL
valueFrom:
configMapKeyRef:
name: kube-lego
key: lego.email
- name: LEGO_URL
valueFrom:
configMapKeyRef:
name: kube-lego
key: lego.url
- name: LEGO_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
KUBECONFIG
cat > /etc/kubernetes/addons/kube-lego/kube-lego-svc.yaml <<KUBECONFIG
apiVersion: v1
kind: Service
metadata:
name: kube-lego
namespace: kube-system
annotations:
ingress-nginx.kubernetes.io/ssl-redirect: "false"
spec:
ports:
- port: 8080
targetPort: 8080
selector:
app: kube-lego
KUBECONFIG
mkdir -p /etc/kubernetes/addons/nginx
cat > /etc/kubernetes/addons/nginx/nginx-configmap.yaml <<KUBECONFIG
apiVersion: v1
data:
proxy-connect-timeout: "15"
proxy-read-timeout: "600"
proxy-send-imeout: "600"
hsts-include-subdomains: "false"
body-size: "64m"
map-hash-bucket-size: "64"
kind: ConfigMap
metadata:
name: nginx
namespace: kube-system
KUBECONFIG
cat > /etc/kubernetes/addons/nginx/nginx-deployment.yaml <<KUBECONFIG
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:${NGINX_VERSION}
name: nginx
imagePullPolicy: Always
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
livenessProbe:
httpGet:
path: /healthz
port: 10249
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 80
- containerPort: 443
args:
- /nginx-ingress-controller
- --default-backend-service=kube-system/default-http-backend
- --nginx-configmap=kube-system/nginx
volumeMounts:
- name: nginx-template
path: /etc/nginx/template
volumes:
- hostPath:
path: /etc/nginx
name: nginx-template
KUBECONFIG
cat > /etc/kubernetes/addons/nginx/nginx-svc.yaml <<KUBECONFIG
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: kube-system
spec:
type: NodePort
externalIPs:
- 172.20.0.2
ports:
- port: 80
name: http
- port: 443
name: https
selector:
app: nginx
KUBECONFIG
cat > /etc/kubernetes/addons/nginx/default-http-backend-svc.yaml <<KUBECONFIG
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8080
protocol: TCP
selector:
app: default-http-backend
KUBECONFIG
cat > /etc/kubernetes/addons/nginx/default-http-backend-deployment.yaml <<KUBECONFIG
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: default-http-backend
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: default-http-backend
spec:
containers:
- name: default-http-backend
# Any image is permissable as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
KUBECONFIG
cat > /etc/kubernetes/master-kubeconfig.yaml <<KUBECONFIG
apiVersion: v1
kind: Config
clusters:
- name: kube
cluster:
certificate-authority: /etc/kubernetes/ca.crt
server: https://${ADVERTISE_IP}:8443
users:
- name: kubelet
user:
client-certificate: /etc/kubernetes/server.cert
client-key: /etc/kubernetes/server.key
contexts:
- context:
cluster: kube
user: kubelet
KUBECONFIG
cat > /etc/kubernetes/cni/net.d/10-calico.conf <<CALICOCNI
{
"name": "calico-k8s-network",
"type": "calico",
"log_level": "${POL_LOGLEVEL}",
"etcd_authority": "${ETCD_AUTHORITY}",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "http://127.0.0.1:8080/api/v1/"
}
}
CALICOCNI
# cat > /etc/kubernetes/cni/net.d/10-calico.conf <<CALICOCNI
# {"bridge":"cbr0","ipMasq":true,"ipam":{"routes":[{"dst":"0.0.0.0/0"}],"subnet":"10.240.1.0/24","type":"host-local"},"isGateway":true,"name":"kubenet","type":"bridge"}
# CALICOCNI
# cat > /etc/kubernetes/cni/net.d/10-calico.conf <<CALICOCNI
# {
# "name": "calico-k8s-network",
# "type": "calico",
# "log_level": "${POL_LOGLEVEL}",
# "ipam": {
# "type": "calico-ipam"
# },
# "policy": {
# "type": "k8s",
# "k8s_api_root": "http://127.0.0.1:8080/api/v1/"
# }
# }
# CALICOCNI
if [ ! -d /var/lib/kubelet/pods ]
then
echo start server
#start admin docker
#docker run --name admin -d -v /etc/kubernetes/ssl:/data oondeo/kubectl
#start master
# mount -o noatime,bind /var/lib/kubelet /var/lib/kubelet
# mount --make-shared /var/lib/kubelet
# --volume=/dev:/dev:rw \
# --volume=/:/rootfs:ro \
# --volume=/sys:/sys:rw \
# --volume=/opt/cni:/opt/cni:ro \
docker run -d \
--restart always --name kubemaster \
-e ETCD_ENDPOINTS="${ETCD_ENDPOINTS}" -e IP="${ADVERTISE_IP}"\
-e CALICO_CTL_CONTAINER='TRUE' -e CALICO_NETWORKING='true' \
-e FELIX_FELIXHOSTNAME="${ADVERTISE_IP}" -e FELIX_ETCDENDPOINTS="${ETCD_ENDPOINTS}" \
--volume=/etc/localtime:/etc/localtime:ro \
--volume=/etc/kubernetes:/etc/kubernetes:rw \
--volume=/etc/ssl/certs:/etc/ssl/certs:ro \
--volume=/var/lib/docker:/var/lib/docker:rw \
--volume=/var/lib/kubelet:/var/lib/kubelet:shared \
--volume=/:/rootfs:ro \
--volume=/sys:/sys:ro \
--volume=/dev:/dev:ro \
--volume=/var/log:/var/log:rw \
--volume=/var/run:/var/run:rw \
--net=host \
--pid=host \
--privileged \
${HYPERKUBE_IMAGE} \
/hyperkube kubelet \
--pod-infra-container-image=${INFRA_IMAGE} \
--allow-privileged=true \
--api-servers=http://127.0.0.1:8080 \
--config=/etc/kubernetes/manifests \
--hostname-override=${ADVERTISE_IP} \
--network-plugin-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--address=0.0.0.0 \
--cluster-dns=${DNS_SERVICE_IP} \
--cluster-domain=$DNS_DOMAIN \
--v=${LOGLEVEL}
# --logtostderr=true \
#--network-plugin-dir=/etc/kubernetes/cni/net.d \
#--network-plugin=cni \
#--address=0.0.0.0 \
#--hostname-override=${ADVERTISE_IP} \
#--tls-cert-file=/etc/kubernetes/ssl/server.cert \
#--tls-private-key-file=/etc/kubernetes/ssl/server.key \
#--kubeconfig=/etc/kubernetes/master-kubeconfig.yaml \
#--register-node=true \
#--register-schedulable=true \
#--enable-server \
# kubelet \
# --pod-infra-container-image=${INFRA_IMAGE} \
# --allow-privileged=true \
# --api-servers=http://127.0.0.1:8080 \
# --config=/etc/kubernetes/manifests \
# --hostname-override=${ADVERTISE_IP} \
# --network-plugin-dir=/etc/cni/net.d \
# --network-plugin=cni \
# --address=0.0.0.0 \
# --cluster-dns=${DNS_SERVICE_IP} \
# --cluster-domain=$DNS_DOMAIN \
# --logtostderr=true \
# --v=${LOGLEVEL}
#--network-plugin-dir=/etc/kubernetes/cni/net.d \
#--network-plugin=cni \
#--address=0.0.0.0 \
#--hostname-override=${ADVERTISE_IP} \
#--tls-cert-file=/etc/kubernetes/ssl/server.cert \
#--tls-private-key-file=/etc/kubernetes/ssl/server.key \
#--kubeconfig=/etc/kubernetes/master-kubeconfig.yaml \
#--register-node=true \
#--register-schedulable=true \
#--enable-server \
sleep 60
# cd /root
# docker cp kubectl kubemaster:/usr/local/bin/kubectl
# docker cp calicoctl kubemaster:/usr/local/bin/calicoctl
# docker cp policy kubemaster:/usr/local/bin/policy
# #calicoctl node --runtime=docker --node-image=calico/node:${CALICO_VERSION}
# calicoctl node --runtime=none
# docker run -d --name calico-node --net=host --privileged --restart=always -e ETCD_SCHEME="http" \
# -e ETCD_AUTHORITY=${ETCD_AUTHORITY} -e IP=${ADVERTISE_IP} -e CALICO_NETWORKING=true -e HOSTNAME="${ADVERTISE_IP}" \
# -e FELIX_ETCDENDPOINTS=${ETCD_ENDPOINTS} -e FELIX_FELIXHOSTNAME=${ADVERTISE_IP} \
# -v /etc/localtime:/etc/localtime:ro -v /lib/modules:/lib/modules:ro \
# -v /var/run/calico:/var/run/calico:rw -v /var/log/calico:/var/log/calico:rw \
# calico/node:${CALICO_VERSION}
# docker run -d --name calico-node --net=host --privileged --restart=always -e ETCD_SCHEME="http" \
# -e ETCD_AUTHORITY=${ETCD_AUTHORITY} -e IP=${ADVERTISE_IP} -e CALICO_NETWORKING=true -e HOSTNAME="${ADVERTISE_IP}" \
# -e FELIX_ETCDADDR=${ETCD_AUTHORITY} -e FELIX_ETCDSCHEME="http" -e FELIX_FELIXHOSTNAME=${ADVERTISE_IP} \
# -v /etc/localtime:/etc/localtime:ro -v /lib/modules:/lib/modules:ro \
# -v /var/run/calico:/var/run/calico:rw -v /var/log/calico:/var/log/calico:rw \
# calico/node:${CALICO_VERSION}
calicoctl pool add ${POD_NETWORK} --nat-outgoing
calicoctl node --ip=${ADVERTISE_IP}
# curl -H "Content-Type: application/json" -XPOST -d'{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}' "http://127.0.0.1:8080/api/v1/namespaces"
# curl -H "Content-Type: application/json" -XPOST -d'{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"calico-system"}}' "http://127.0.0.1:8080/api/v1/namespaces"
# curl -H "Content-Type: application/json" -XPOST http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/default/thirdpartyresources --data-binary @- <<BODY
# {
# "kind": "ThirdPartyResource",
# "apiVersion": "extensions/v1beta1",
# "metadata": {
# "name": "network-policy.net.alpha.kubernetes.io"
# },
# "description": "Specification for a network isolation policy",
# "versions": [
# {
# "name": "v1alpha1"
# }
# ]
# }
# BODY
fi
kubectl config set-cluster default-cluster --server=http://127.0.0.1:8080
kubectl config set-credentials default-admin
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
kubectl config use-context default-system
cat <<RUNCONF
kubectl config set-cluster default-cluster --server=https://${ADVERTISE_IP}:8443 --certificate-authority=ca.crt
kubectl config set-credentials default-admin --certificate-authority=ca.crt --client-key=kubecfg.key --client-certificate=kubecfg.crt
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
kubectl config use-context default-system
kubectl run -i --tty busybox --image=busybox --restart=Never
RUNCONF
#-------------------------------------node-------------------------------------------
exit 0
#start container
# Start kubelet & proxy in container
# TODO: Use secure port for communication
docker run \
--net=host \
--pid=host \
--privileged \
--restart=always \
-d \
-v /sys:/sys:ro \
-v /var/run:/var/run:rw \
-v /:/rootfs:ro \
-v ${DOCKER_DIR}:/var/lib/docker:rw \
-v ${DOCKER_DIR}:${DOCKER_DIR}:rw \
-v /var/lib/kubelet/:/var/lib/kubelet:rw \
gcr.io/google_containers/hyperkube:v${K8S_VERSION} \
/hyperkube kubelet \
--allow-privileged=true \
--api-servers=http://${ADVERTISE_IP}:8080 \
--address=0.0.0.0 \
--enable-server \
--cluster-dns=${DNS_SERVICE_IP} \
--cluster-domain=${DNS_DOMAIN} \
--containerized \
--v=${LOGLEVEL}
docker run \
-d \
--net=host \
--privileged \
--restart=always \
gcr.io/google_containers/hyperkube:v${K8S_VERSION} \
/hyperkube proxy \
--master=http://${ADVERTISE_IP}:8080 \
--v=${LOGLEVEL}
# If the kube-system namespace isn't already created, create it
$ kubectl get ns
$ kubectl create namespace kube-system
$ kubectl create -f ./skydns.yaml
#---------------------oldapiserver---------------------------------------------
cat > /etc/kubernetes/manifests/kube-apiserver.yaml <<KUBEAPISERVER
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: ${HYPERKUBE_IMAGE}
command:
- /hyperkube
- apiserver
- --bind-address=0.0.0.0
- --etcd-servers=${ETCD_ENDPOINTS}
- --service-cluster-ip-range=${SERVICE_IP_RANGE}
- --secure-port=8443
- --advertise-address=${ADVERTISE_IP}
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
- --tls-cert-file=/etc/kubernetes/ssl/server.cert
- --tls-private-key-file=/etc/kubernetes/ssl/server.key
- --client-ca-file=/etc/kubernetes/ssl/ca.crt
- --service-account-key-file=/etc/kubernetes/ssl/server.key
- --basic-auth-file=/etc/kubernetes/basic_auth.csv
- --token-auth-file=/etc/kubernetes/known_tokens.csv
- --allow-privileged=true
- --v=${LOGLEVEL}
ports:
- containerPort: 8443
hostPort: 8443
name: https
- containerPort: 8080
hostPort: 8080
name: local
volumeMounts:
- mountPath: /etc/kubernetes
name: ssl-certs-kubernetes
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /etc/localtime
name: localtime-host
readOnly: true
volumes:
- hostPath:
path: /etc/localtime
name: localtime-host
- hostPath:
path: /etc/kubernetes
name: ssl-certs-kubernetes
- hostPath:
path: /etc/ssl/certs
name: ssl-certs-host
KUBEAPISERVER
#-----------------------------client----------------------------
kubectl config set-cluster default-cluster --server=https://172.20.0.2:8443 --certificate-authority=ca.crt
kubectl config set-credentials default-admin --certificate-authority=ca.crt --client-key=kubecfg.key --client-certificate=kubecfg.crt
kubectl config set-context default-system --cluster=default-cluster --user=default-admin
kubectl config use-context default-system
kubectl run -i --tty busybox --image=busybox --restart=Never
#docker run --net=host -v /etc/localtime:/etc/localtime:ro -v /:/rootfs:ro -it --rm --name prueba oondeo/kubectl --server http://127.0.0.1:8080 get nodes
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment