Skip to content

Instantly share code, notes, and snippets.

@jklare
Last active September 9, 2019 13:18
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jklare/59db304c41d44d891665e7a717405afe to your computer and use it in GitHub Desktop.
Save jklare/59db304c41d44d891665e7a717405afe to your computer and use it in GitHub Desktop.
Install an all-in-one Kubernetes with kubeadm on ubuntu and use multus-cni to create a multi-homed pod
#!/bin/bash
set -ex
IF_to_switch_VTEP=net1
VTEP=vxlan4090
bridge=swbridge
veth_a=eth40
veth_b=eth50
MAC_of_switch_VTEP=b6:14:ca:80:35:fa
IP_of_switch_VTEP=172.16.248.4
VTEP=vxlan4090
#clear existing state (in case you need to rerun this)
for l in $VTEP $bridge $veth_b $veth_a ; do ip l d $l ; done
sleep 1
# set the below only when directly connected to the switch (VTEP on switch will not answer ARP)
ip l set $IF_to_switch_VTEP down
sleep 1
ip neighbor add $IP_of_switch_VTEP lladdr $MAC_of_switch_VTEP dev $IF_to_switch_VTEP nud permanent
sleep 1
ip l set $IF_to_switch_VTEP up
# bridge
ip link add name $bridge type bridge vlan_filtering 1
bridge vlan add vid 4090 dev $bridge self
ip link set $bridge up
sleep 1
#vxlan4090
ip link add $VTEP type vxlan id 14090 dstport 4789 remote $IP_of_switch_VTEP
sleep 1
ip l set $VTEP mtu 1300
ip link set $VTEP master $bridge
bridge vlan add vid 4090 dev $VTEP master pvid untagged
bridge vlan del vid 1 dev $VTEP
ip l set up $VTEP
sleep 1
# create veth pair
ip link add $veth_a type veth peer name $veth_b
sleep 1
# add veth_a to swbridge
ip link set $veth_a master $bridge
sleep 1
# add vid4090
bridge vlan add vid 4090 dev $veth_a
bridge vlan del vid 1 dev $veth_a
sleep 1
# bring up the interfaces
ip link set $veth_a up
ip link set $veth_b up
sleep 1
# configure vid 4090 to veth_b
ip l a link $veth_b name ${veth_b}.c4090 type vlan proto 802.1q id 4090
ip l s ${veth_b}.c4090 up
#!/bin/bash
# This script will provision kubernetes with the help of kubeadm and docker as
# container engine and calico as network addon. The commands are copied from
# the official guide here:
# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/
set -ex
swapoff -a
sed -i '/swap/d' /etc/fstab
apt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get update && apt-get install -y docker-ce=18.06.2~ce~3-0~ubuntu
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl restart docker
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
kubeadm init --pod-network-cidr=192.168.0.0/16 # calico
KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml # calico
KUBECONFIG=/etc/kubernetes/admin.conf kubectl taint nodes --all node-role.kubernetes.io/master-
mkdir -p /home/ubuntu/.kube
cp -i /etc/kubernetes/admin.conf /home/ubuntu/.kube/config
chown ubuntu:ubuntu /home/ubuntu/.kube/config
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
namespace: voltha
spec:
config: '{
"cniVersion": "0.3.1",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.1.0/24",
"rangeStart": "192.168.1.200",
"rangeEnd": "192.168.1.216",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.1.1"
}
}'
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: network-attachment-definitions.k8s.cni.cncf.io
spec:
group: k8s.cni.cncf.io
version: v1
scope: Namespaced
names:
plural: network-attachment-definitions
singular: network-attachment-definition
kind: NetworkAttachmentDefinition
shortNames:
- net-attach-def
validation:
openAPIV3Schema:
properties:
spec:
properties:
config:
type: string
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
rules:
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- '*'
verbs:
- '*'
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: multus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: multus
subjects:
- kind: ServiceAccount
name: multus
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: multus
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: multus-cni-config
namespace: kube-system
labels:
tier: node
app: multus
data:
# NOTE: If you'd prefer to manually apply a configuration file, you may create one here.
# In the case you'd like to customize the Multus installation, you should change the arguments to the Multus pod
# change the "args" line below from
# - "--multus-conf-file=auto"
# to:
# "--multus-conf-file=/tmp/multus-conf/70-multus.conf"
# Additionally -- you should ensure that the name "70-multus.conf" is the alphabetically first name in the
# /etc/cni/net.d/ directory on each node, otherwise, it will not be used by the Kubelet.
cni-conf.json: |
{
"name": "multus-cni-network",
"type": "multus",
"capabilities": {
"portMappings": true
},
"delegates": [
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "vagrant",
"mtu": 1440,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
],
"kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds-amd64
namespace: kube-system
labels:
tier: node
app: multus
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
image: nfvpe/multus:v3.2
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-multus-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: multus
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
tier: node
app: multus
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: ppc64le
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: multus
containers:
- name: kube-multus
# ppc64le support requires multus:latest for now. support 3.3 or later.
image: nfvpe/multus:latest-ppc64le
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
resources:
requests:
cpu: "100m"
memory: "90Mi"
limits:
cpu: "100m"
memory: "90Mi"
securityContext:
privileged: true
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: cnibin
mountPath: /host/opt/cni/bin
- name: multus-cfg
mountPath: /tmp/multus-conf
volumes:
- name: cni
hostPath:
path: /etc/cni/net.d
- name: cnibin
hostPath:
path: /opt/cni/bin
- name: multus-cfg
configMap:
name: multus-cni-config
items:
- key: cni-conf.json
path: 70-multus.conf
apiVersion: apps/v1
kind: StatefulSet
metadata:
generation: 2
name: vcore
namespace: voltha
spec:
podManagementPolicy: OrderedReady
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: vcore
serviceName: vcore
template:
metadata:
annotations:
cni: calico
k8s.v1.cni.cncf.io/networks: macvlan-conf
creationTimestamp: null
labels:
app: vcore
spec:
containers:
- args:
- voltha/voltha/main.py
- --etcd=etcd-cluster.default.svc.cluster.local:2379
- --kafka=cord-platform-kafka.default.svc.cluster.local
- --rest-port=8880
- --grpc-port=50556
- --interface=eth1
- --backend=etcd
- --pon-subnet=10.38.0.0/12
- --ponsim-comm=grpc
- --core-number-extractor=^.*-([0-9]+)_.*$
env:
- name: NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: voltha/voltha-voltha:1.6.0
imagePullPolicy: Always
name: voltha
ports:
- containerPort: 8880
name: rest-port
protocol: TCP
- containerPort: 18880
name: mystery-port
protocol: TCP
- containerPort: 50556
name: grpc-port
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /voltha/voltha/logconfig.yml
name: vcore-logconfig-vol
subPath: logconfig.yml
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: voltha-serviceaccount
serviceAccountName: voltha-serviceaccount
terminationGracePeriodSeconds: 0
volumes:
- configMap:
defaultMode: 420
items:
- key: logconfig
path: logconfig.yml
name: vcore-logconfig
name: vcore-logconfig-vol
updateStrategy:
type: RollingUpdate
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment