Last active
February 24, 2020 00:12
-
-
Save csiens/6ee4fadee2f72a0dd68fdbec415460d1 to your computer and use it in GitHub Desktop.
Wrapper to setup an RKE cluster with TungstenFabric as the CNI
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env bash | |
# | |
# Run this as root on the first master node. You must be able to ssh as the root user to each node via ssh keys | |
# installed at /root/.ssh/ on the first master node. The public ssh key MUST be in the /root/.ssh/authorized_keys | |
# file on ALL nodes including the first master. Use "ssh-keygen" to create an ssh keypair and use "ssh-copy-id NODE_IP" | |
# to distribute the public key to ALL nodes. | |
# | |
# The following commands are used to prepare a generic EC2 or GCE instance and run the script. | |
# # enter an interactive sudo session | |
# sudo -i | |
# # create a keypair, accept all defaults | |
# ssh-keygen | |
# # add public ssh key to local machine | |
# cat .ssh/id_rsa.pub >> .ssh/authorized_keys | |
# # replace /etc/resolv.conf file | |
# rm /etc/resolv.conf ; echo 'nameserver 8.8.8.8' > /etc/resolv.conf | |
# # distribute keypair to all other nodes | |
# ssh-copy-id NODE_IP | |
# # chmod and run script | |
# chmod +x /root/rke-tf-installer.sh ; /root/./rke-tf-installer.sh | |
# | |
# For a single node test environment use an instance with 8 vcpu, 32 gb of ram, and 30 gb of disk. | |
# | |
# This script will create config files based on these variables and then run an ansible playbook. The playbook will | |
# deploy an RKE Kubernetes cluster using TungstenFabric as the CNI. | |
# | |
# This script requires a fresh install of either Ubuntu 16.04, Ubuntu 18.04, or Ubuntu 20.04 on ALL nodes. | |
# All nodes MUST be running the same OS image. | |
# | |
# The TungstenFabric control plane will run on worker nodes. The ip or ips of the TungstenFabric control plane | |
# node/s must be included in the "tf_cp_ip" variable and in the "k8s_worker_ip" variable. The TungstenFabric control | |
# plane worker nodes are shared and will also run normal Kubernetes workloads by default. Use taints and tolerations | |
# to dedicate nodes to the TungstenFabric control plane. | |
# | |
# You MUST use an ODD number of nodes for the "k8s_etcd_ip" variable. The etcd nodes can also be master nodes or | |
# dedicated etcd nodes. | |
# | |
# If you do not have a load balancer for your Kubernetes masters then set "k8s_api_lb" to the ip of the first master | |
# node. If you want to create a local load balancer you can set "haproxy=true" and haproxy/heartbeat | |
# will be deployed using the "k8s_api_lb" ip as the VIP to the master nodes. | |
# | |
# The example node variables below will use nine nodes to deploy three Kubernetes master nodes, three etcd nodes, and | |
# three Kubernetes worker nodes that are also TungstenFabric control plane nodes. You must specify at least one ip for | |
# each of the following five variables. If you want to use a single node for testing then use the same ip for each of | |
# the five variables. | |
# | |
#Node role variables | |
######################################################################################################################## | |
#Kubernetes master node ip. This is the ip or a comma separated list of ips for nodes that will be Kubernetes masters. | |
#By default non-master workloads will NOT run on these nodes. Masters can also run etcd if you add them to | |
#the "k8s_etcd_ip" variable. | |
#Specify multiple ips via 1.1.1.1,2.2.2.2,3.3.3.3 | |
k8s_cp_ip="10.9.8.21,10.9.8.22,10.9.8.23" | |
#Kubernetes etcd node ip. This is the ip or a comma separated list of ips for nodes that will be Kubernetes etcd hosts. | |
#You MUST use an ODD number of nodes for this variable. The etcd nodes can be master nodes or dedicated etcd nodes. | |
#You must specify at least one node ip. | |
#Specify multiple ips via 1.1.1.1,2.2.2.2,3.3.3.3 | |
k8s_etcd_ip="10.9.8.21,10.9.8.22,10.9.8.23" | |
#Kubernetes worker node ip. This is the ip or a comma separated list of ips for nodes that will be Kubernetes workers. | |
#Specify multiple ips via 1.1.1.1,2.2.2.2,3.3.3.3 | |
k8s_worker_ip="10.9.8.24,10.9.8.25,10.9.8.26,10.9.8.27,10.9.8.28,10.9.8.29" | |
#TungstenFabric control node ip. This is the ip or comma separated list of ips of the worker nodes that the | |
#TungstenFabric control plane will run on. You must specify at least one worker node ip. These worker nodes will also | |
#run normal Kubernetes work loads. All of the ips MUST also be included in the "k8s_worker_ip" variable. | |
#Specify multiple ips via 1.1.1.1,2.2.2.2,3.3.3.3 | |
tf_cp_ip="10.9.8.24,10.9.8.25,10.9.8.26" | |
#Kubernetes api load balancer ip. If you don't have an external api load balancer specify the ip of first master node. | |
#Used as the VIP for haproxy/heartbeat if haproxy="true". | |
k8s_api_lb="10.9.8.21" | |
#Advanced variables. | |
######################################################################################################################## | |
#Setup haproxy/heartbeat on master nodes. Default is "false", set to "true" to install haproxy/heartbeat load balancer. | |
#You must have more than one master node for this to work | |
haproxy="false" | |
#Create heartbeat preshared key. Only used if haproxy="true" | |
heartbeat_pw="heartbeatpa55word" | |
#Kubernetes pod subnet. If you modify this variable you will also need to modify the "pod_net_dns" variable. | |
k8s_pod_subnet="10.32.0.0/12" | |
#Kubernetes service subnet. If you modify this variable you will also need to modify the "svc_net_dns" variable. | |
k8s_svc_subnet="10.96.0.0/12" | |
#Pod network dns ip. Make sure this ip is in the "k8s_pod_subnet" CIDR. | |
#Use the 3rd ip from the top of the CIDR like this X.X.X.253 | |
pod_net_dns="10.47.255.253" | |
#Service network dns ip. Make sure this ip is in the "k8s_svc_subnet" CIDR. | |
#Use the 3rd ip from the bottom of the CIDR like this X.X.X.3 | |
svc_net_dns="10.96.0.3" | |
#TungstenFabric ip-fabric subnet. | |
tf_ipfabric_subnet="10.64.0.0/12" | |
#Kubernetes cluster FQDN. | |
k8s_cluster_fqdn="cluster.local" | |
#Registry to download images from. | |
registry="docker.io/opencontrailnightly" | |
#Contiainer image tag. Set to "latest" for the nightly builds. | |
tag="1912-latest" | |
#Branch for contrail-container-builder git clone | |
branch="R1912" | |
#Ntp pool for nodes. | |
ntp_pool="pool.ntp.org" | |
#TungstenFabric floating ip virtual network settings. | |
#Virtual network name. | |
tf_fip_net_name="external-fip" | |
#Virtual network subnet. | |
tf_fip_subnet="10.10.10.0" | |
#Virtual subnet mask bits. | |
tf_fip_snbits="24" | |
#TungstenFabric api load balancer ip. The default is set to the first ip in the "tf_cp_ip" variable. | |
tf_api_lb=$(echo ${tf_cp_ip} | tr ',' ' ' | awk '{print $1}') | |
#Leave the rest alone unless you know what you are doing :) | |
######################################################################################################################## | |
#Coredns sed line | |
coredns_line=" forward . $pod_net_dns" | |
#Setup node variables | |
all_nodes_raw="$k8s_cp_ip,$k8s_worker_ip,$k8s_etcd_ip" | |
all_nodes_raw_nc=$(echo ${all_nodes_raw} | tr ',' ' ') | |
all_nodes=$(echo "${all_nodes_raw_nc}"|tr " " "\n"|sort|uniq|tr "\n" " "| tr ' ' ','| sed '$s/,$//' ) | |
all_nodes_nc=$(echo ${all_nodes} | tr ',' ' ') | |
k8s_cp_nc=$(echo ${k8s_cp_ip} | tr ',' ' ') | |
k8s_etcd_nc=$(echo ${k8s_etcd_ip} | tr ',' ' ') | |
k8s_worker_nc=$(echo ${k8s_worker_ip} | tr ',' ' ') | |
#Create /root/rke-tf folder and sub folders | |
mkdir /root/rke-tf | |
mkdir /root/rke-tf/vnc_api | |
mkdir /root/rke-tf/vnc_api/vnc_api | |
mkdir /root/rke-tf/vnc_api/cfgm_common | |
#Install python on all nodes | |
for host in ${all_nodes_nc}; do | |
ssh-keyscan -H ${host} >> /root/.ssh/known_hosts | |
done | |
for host in ${all_nodes_nc}; do | |
ssh -l root ${host} apt-get install python2.7 iptables arptables ebtables -y | |
done | |
[[ ! -f /usr/bin/python ]] && ln -s /usr/bin/python2.7 /usr/bin/python | |
#Install cowsay and wget | |
apt-get update | |
apt-get install cowsay wget python-pip -y | |
#Install ansible | |
pip install ansible | |
#Pip install future package | |
pip install future | |
#Pip install requests | |
pip install requests | |
/usr/games/cowsay -ftux "Creating config files" | |
#Create /root/rke-tf/cluster.yml | |
echo "nodes:" > /root/rke-tf/cluster.yml | |
k8s_cp_role=$'\n - controlplane' | |
k8s_etcd_role=$'\n - etcd' | |
k8s_worker_role=$'\n - worker' | |
for host in ${all_nodes_nc}; do | |
hostname=$(ssh -l root ${host} hostname) | |
roles='' | |
cp_role_test=$(echo ${k8s_cp_nc} | grep -o ${host}) | |
etcd_role_test=$(echo ${k8s_etcd_nc} | grep -o ${host}) | |
worker_role_test=$(echo ${k8s_worker_nc} | grep -o ${host}) | |
if [[ "${cp_role_test}" == "${host}" ]]; then | |
roles="${roles}${k8s_cp_role}" | |
fi | |
if [[ "${etcd_role_test}" == "${host}" ]]; then | |
roles="${roles}${k8s_etcd_role}" | |
fi | |
if [[ "${worker_role_test}" == "${host}" ]]; then | |
roles="${roles}${k8s_worker_role}" | |
fi | |
cat << EOF >> /root/rke-tf/cluster.yml | |
- address: ${host} | |
port: "22" | |
role:${roles} | |
hostname_override: ${hostname} | |
user: root | |
docker_socket: /var/run/docker.sock | |
ssh_key_path: ~/.ssh/id_rsa | |
EOF | |
done | |
cat << EOF >> /root/rke-tf/cluster.yml | |
services: | |
kube-api: | |
service_cluster_ip_range: ${k8s_svc_subnet} | |
pod_security_policy: false | |
always_pull_images: false | |
kube-controller: | |
cluster_cidr: ${k8s_pod_subnet} | |
service_cluster_ip_range: ${k8s_svc_subnet} | |
kubelet: | |
cluster_domain: ${k8s_cluster_fqdn} | |
cluster_dns_server: ${svc_net_dns} | |
fail_swap_on: false | |
network: | |
plugin: none | |
authentication: | |
strategy: x509 | |
sans: | |
- "${k8s_api_lb}" | |
ssh_key_path: ~/.ssh/id_rsa | |
ssh_agent_auth: false | |
authorization: | |
mode: rbac | |
ignore_docker_version: false | |
EOF | |
#Create /root/rke-tf/common.env | |
cat << EOF > /root/rke-tf/common.env | |
CONTRAIL_REGISTRY=${registry} | |
CONTRAIL_CONTAINER_TAG=${tag} | |
KUBERNETES_API_SERVER=${k8s_api_lb} | |
KUBERNETES_API_SECURE_PORT=6443 | |
CONTROLLER_NODES=${tf_cp_ip} | |
WEBUI_VIP=${tf_cp_ip} | |
ANALYTICS_NODES=${tf_cp_ip} | |
ANALYTICSDB_NODES=${tf_cp_ip} | |
ANALYTCIS_ALARM_NODES=${tf_cp_ip} | |
ANLAYTICS_SNMP_NODES=${tf_cp_ip} | |
KAFKA_NODES=${tf_cp_ip} | |
ZOOKEEPER_NODES=${tf_cp_ip} | |
AGENT_NODES=${all_nodes} | |
LOG_LEVEL=SYS_DEBUG | |
CLOUD_ORCHESTRATOR='kubernetes' | |
KUBERNETES_CLUSTER_NAME="k8s" | |
EOF | |
#Create /root/rke-tf/inventory.ini | |
echo "[all]" > /root/rke-tf/inventory.ini | |
for host in ${all_nodes_nc}; do | |
hostname=$(ssh -l root ${host} hostname) | |
echo ${hostname}" ansible_host="${host} >> /root/rke-tf/inventory.ini | |
done | |
#If test for haproxy/heartbeat deployment | |
if [[ "${haproxy}" = true ]]; then | |
#Create ha config folder | |
mkdir /root/rke-tf/ha | |
#Create ha.authkeys file | |
authkey=$(echo -n ${heartbeat_pw} | md5sum | awk '{print $1}') | |
echo "auth 1" > /root/rke-tf/ha/ha.authkeys | |
echo "1 md5 "${authkey} >> /root/rke-tf/ha/ha.authkeys | |
#Create haresources file | |
ha_master_ip=$(echo ${k8s_cp_nc} | awk '{print $1}') | |
ha_master_hostname=$(ssh -l root ${ha_master_ip} hostname) | |
echo ${ha_master_hostname}" "${k8s_api_lb} > /root/rke-tf/ha/haresources | |
#Create haproxy.cfg | |
cat << EOF >> /root/rke-tf/ha/haproxy.cfg | |
global | |
user haproxy | |
group haproxy | |
defaults | |
mode http | |
log global | |
retries 2 | |
timeout connect 3000ms | |
timeout server 5000ms | |
timeout client 5000ms | |
frontend kubernetes | |
bind ${k8s_api_lb}:6443 | |
option tcplog | |
mode tcp | |
default_backend kubernetes-master-nodes | |
backend kubernetes-master-nodes | |
mode tcp | |
balance roundrobin | |
option tcp-check | |
EOF | |
for host in ${k8s_cp_nc}; do | |
hostname=$(ssh -l root ${host} hostname) | |
echo "server "${hostname}" "${host}":6443 check fall 3 rise 2" >> /root/rke-tf/ha/haproxy.cfg | |
done | |
#Create heartbeat.cf files | |
for host in ${k8s_cp_nc}; do | |
hostname=$(ssh -l root ${host} hostname) | |
interface=$(ssh -l root ${host} ip -o -4 route show to default | awk '{print $5}') | |
cat << EOF >> /root/rke-tf/ha/heartbeat.${hostname}.cf | |
keepalive 2 | |
deadtime 10 | |
udpport 694 | |
bcast ${interface} | |
mcast ${interface} 225.0.0.1 694 1 0 | |
ucast ${interface} ${host} | |
udp ${interface} | |
logfacility local0 | |
EOF | |
for ha_host in $(echo ${k8s_cp_ip} | tr ',' ' '); do | |
ha_name=$(ssh -l root ${ha_host} hostname) | |
echo "node "${ha_name} >> /root/rke-tf/ha/heartbeat.${hostname}.cf | |
done | |
#Scp the ha.cf file to each master node | |
ssh -l root ${host} mkdir -p /etc/ha.d | |
scp /root/rke-tf/ha/heartbeat.${hostname}.cf root@${host}:/etc/ha.d/ha.cf | |
done | |
#Scp the other config files to each master node | |
for host in ${k8s_cp_nc}; do | |
ssh -l root ${host} mkdir -p /etc/haproxy | |
ssh -l root ${host} apt-get install -y haproxy heartbeat | |
scp /root/rke-tf/ha/haproxy.cfg root@${host}:/etc/haproxy/haproxy.cfg | |
scp /root/rke-tf/ha/ha.authkeys root@${host}:/etc/ha.d/authkeys | |
scp /root/rke-tf/ha/haresources root@${host}:/etc/ha.d/haresources | |
ssh -l root ${host} chmod 600 /etc/ha.d/authkeys | |
ssh -l root ${host} systemctl enable haproxy | |
ssh -l root ${host} systemctl start haproxy | |
ssh -l root ${host} systemctl enable heartbeat | |
ssh -l root ${host} systemctl start heartbeat | |
done | |
else | |
true | |
fi | |
#Create subnet file /root/rke-tf/vars.tf | |
cat << EOF > /root/rke-tf/vars.tf | |
KUBERNETES_IP_FABRIC_SUBNETS: "${tf_ipfabric_subnet}" | |
KUBERNETES_POD_SUBNETS: "${k8s_pod_subnet}" | |
KUBERNETES_PUBLIC_FIP_POOL: "{'domain': 'default-domain', 'project': 'k8s-default', 'network': '${tf_fip_net_name}', 'name': 'default'}" | |
KUBERNETES_SERVICE_SUBNETS: "${k8s_svc_subnet}" | |
EOF | |
#Create fip file /root/rke-tf/fip.tf | |
cat << EOF > /root/rke-tf/fip.tf | |
EOF | |
#Create FIP network python script | |
cat << EOF > /root/rke-tf/vnc_api/fip_network.py | |
#!/usr/bin/python | |
import time | |
import subprocess | |
import os | |
# give pods a head start | |
time.sleep(600) | |
# wait for tf control plane pod to be ready | |
not_ready_cmd = "kubectl get pods -A | grep contrail-controller-control | awk '{print \$4}' | grep -vE 'Running|Completed'" | |
run_not_ready_cmd = subprocess.Popen(not_ready_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | |
not_ready = run_not_ready_cmd.communicate()[0] | |
while bool(not_ready): | |
time.sleep(20) | |
not_ready_cmd = "kubectl get pods -A | grep contrail-controller-control | awk '{print \$4}' | grep -vE 'Running|Completed'" | |
run_not_ready_cmd = subprocess.Popen(not_ready_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | |
not_ready = run_not_ready_cmd.communicate()[0] | |
# get control pod id | |
pod_id_cmd = "kubectl get pods -n kube-system | grep contrail-controller-control | awk 'NR==1{print \$1}'" | |
run_pod_id_cmd = subprocess.Popen(pod_id_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | |
control_pod = run_pod_id_cmd.communicate()[0] | |
# get files | |
os.system("kubectl cp kube-system/{}:usr/lib/python2.7/site-packages/vnc_api /root/rke-tf/vnc_api/vnc_api".format(control_pod.rstrip())) | |
os.system("kubectl cp kube-system/{}:usr/lib/python2.7/site-packages/cfgm_common /root/rke-tf/vnc_api/cfgm_common".format(control_pod.rstrip())) | |
# move files | |
os.system("cp -avr /root/rke-tf/vnc_api/vnc_api/ /usr/lib/python2.7/site-packages/") | |
os.system("cp -avr /root/rke-tf/vnc_api/cfgm_common/ /usr/lib/python2.7/site-packages/") | |
# wait until all tf pods are up | |
tf_not_ready_cmd = "kubectl get pods -A | grep contrail | awk '{print \$4}' | grep -vE 'Running|Completed'" | |
run_tf_not_ready_cmd = subprocess.Popen(tf_not_ready_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | |
tf_not_ready = run_tf_not_ready_cmd.communicate()[0] | |
while bool(tf_not_ready): | |
time.sleep(20) | |
tf_not_ready_cmd = "kubectl get pods -A | grep contrail | awk '{print \$4}' | grep -vE 'Running|Completed'" | |
run_tf_not_ready_cmd = subprocess.Popen(tf_not_ready_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) | |
tf_not_ready = run_tf_not_ready_cmd.communicate()[0] | |
# import vnc_api module | |
from vnc_api import vnc_api | |
# create api client | |
vnc = vnc_api.VncApi(username = "admin", password = "contrail123", tenant_name = "admin", api_server_host = "${tf_api_lb}", auth_host = "${tf_api_lb}") | |
# set tenant_name and project | |
tenant_name="k8s-default" | |
prj = vnc.project_read(fq_name = ['default-domain', tenant_name]) | |
# set new network name | |
vn_name="${tf_fip_net_name}" | |
vn = vnc_api.VirtualNetwork(name = vn_name, parent_obj = prj) | |
# create ipam | |
ipam = vnc_api.NetworkIpam(name = vn_name, parent_obj = prj) | |
vnc.network_ipam_create(ipam) | |
# set subnet variables | |
netaddr="${tf_fip_subnet}" | |
netmask="${tf_fip_snbits}" | |
# create new network subnet | |
subnet = vnc_api.SubnetType(ip_prefix = netaddr, ip_prefix_len = netmask) | |
ipam_subnet = vnc_api.IpamSubnetType(subnet = subnet) | |
vn.set_network_ipam(ref_obj = ipam, ref_data = vnc_api.VnSubnetsType([ipam_subnet])) | |
# read and add network policy | |
policy = vnc.network_policy_read(fq_name = ['default-domain', 'k8s-default', 'k8s-default-ip-fabric-np']) | |
vn.add_network_policy(policy, vnc_api.VirtualNetworkPolicyType(sequence=vnc_api.SequenceType(0, 0))) | |
# set new network attributes | |
vn.set_router_external(True) | |
vn.set_fabric_snat(True) | |
vn_params = vnc_api.VirtualNetworkType() | |
fwdmode="l2" | |
rpf="enable" | |
vn_params.set_forwarding_mode(fwdmode) | |
vn_params.set_rpf(rpf) | |
vn.set_virtual_network_properties(vn_params) | |
# create new network | |
vnc.virtual_network_create(vn) | |
# create fip pool | |
vn_fqdn = vnc.virtual_network_read(fq_name = ['default-domain', tenant_name, vn_name]) | |
fip_pool_name = "default" | |
pool = vnc_api.FloatingIpPool(name = fip_pool_name, parent_obj = vn_fqdn) | |
vnc.floating_ip_pool_create(pool) | |
# add ref to fip pool and update project | |
prj.add_floating_ip_pool(pool) | |
vnc.project_update(prj) | |
EOF | |
#Create /root/rke-tf/playbook.yml | |
cat << EOF > /root/rke-tf/playbook.yml | |
--- | |
- hosts: all | |
vars: | |
kernel_modules: | |
- br_netfilter | |
- ip6_udp_tunnel | |
- ip_set | |
- ip_set_hash_ip | |
- ip_set_hash_net | |
- iptable_filter | |
- iptable_nat | |
- iptable_mangle | |
- iptable_raw | |
- nf_conntrack_netlink | |
- nf_conntrack | |
- nf_conntrack_ipv4 | |
- nf_defrag_ipv4 | |
- nf_nat | |
- nf_nat_ipv4 | |
- nf_nat_masquerade_ipv4 | |
- nfnetlink | |
- udp_tunnel | |
- veth | |
- vxlan | |
- x_tables | |
- xt_addrtype | |
- xt_conntrack | |
- xt_comment | |
- xt_mark | |
- xt_multiport | |
- xt_nat | |
- xt_recent | |
- xt_set | |
- xt_statistic | |
- xt_tcpudp | |
tasks: | |
- name: Ping nodes | |
ping: | |
- name: Update the /etc/hosts file with all node hostnames | |
lineinfile: | |
path: "/etc/hosts" | |
regexp: "{{ hostvars[item]['ansible_env'].SSH_CONNECTION.split(' ')[2] }}\t{{ hostvars[item]['ansible_hostname']}}\t{{ hostvars[item]['ansible_hostname']}}" | |
line: "{{ hostvars[item]['ansible_env'].SSH_CONNECTION.split(' ')[2] }}\t{{ hostvars[item]['ansible_hostname']}}\t{{ hostvars[item]['ansible_hostname']}}" | |
state: present | |
backup: yes | |
register: etchostsupdate | |
when: ansible_hostname != "{{ item }}" or ansible_hostname == "{{ item }}" | |
with_items: "{{groups['all']}}" | |
- name: Load rke kernel modules | |
modprobe: | |
name: "{{ item }}" | |
state: present | |
with_items: "{{ kernel_modules }}" | |
ignore_errors: true | |
- name: Modify sysctl entries | |
sysctl: | |
name: '{{ item.key }}' | |
value: '{{ item.value }}' | |
sysctl_set: yes | |
state: present | |
reload: yes | |
with_items: | |
- {key: net.bridge.bridge-nf-call-ip6tables, value: 1} | |
- {key: net.bridge.bridge-nf-call-iptables, value: 1} | |
- {key: net.ipv4.ip_forward, value: 1} | |
- {key: net.ipv4.ip_nonlocal_bind, value: 1} | |
- name: Delete ntp.conf | |
file: | |
path: /etc/ntp* | |
state: absent | |
ignore_errors: true | |
- name: Replace ntp.conf | |
copy: | |
dest: /etc/ntp.conf | |
content: | | |
driftfile /var/lib/ntp/ntp.drift | |
pool ${ntp_pool} iburst | |
restrict -4 default kod notrap nomodify nopeer noquery limited | |
restrict -6 default kod notrap nomodify nopeer noquery limited | |
restrict 127.0.0.1 | |
restrict ::1 | |
restrict source notrap nomodify noquery | |
ignore_errors: true | |
- name: Install ntp | |
apt: name=ntp state=latest | |
ignore_errors: true | |
- name: Start ntp service | |
shell: systemctl enable --now ntp | |
ignore_errors: true | |
- name: Install git | |
apt: name=git state=latest | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Install docker.io | |
apt: name=docker.io state=latest | |
- name: Enable and start docker | |
shell: systemctl enable --now docker | |
- name: Set AllowTcpForwarding to yes in /etc/ssh/sshd_config | |
shell: sed -i 's/#AllowTcpForwarding yes/AllowTcpForwarding yes/g' /etc/ssh/sshd_config && systemctl restart sshd | |
- name: Turn swap off | |
shell: swapoff -a ; sed -i '/swap/d' /etc/fstab | |
ignore_errors: true | |
- name: Disable firewall | |
shell: ufw disable | |
ignore_errors: true | |
- name: Install kubectl | |
shell: snap install kubectl --classic | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Download rke binary | |
shell: curl -LO https://github.com/rancher/rke/releases/download/v0.3.0/rke_linux-amd64 -o /root/rke-tf/rke_linux-amd64 | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Rename, chmod, move, and test rke binary | |
shell: mv /root/rke-tf/rke_linux-amd64 /root/rke-tf/rke ; chmod +x /root/rke-tf/rke ; mv /root/rke-tf/rke /usr/bin ; rke --version | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Running rke up. BE PATIENT, WAITING FOR THE COMMAND TO FINISH! | |
shell: rke up --config /root/rke-tf/cluster.yml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Copy kubeconfig | |
shell: mkdir /root/.kube ; cp /root/rke-tf/kube_config_cluster.yml /root/.kube/config | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Setup contrail/ports/vm folder | |
shell: docker exec kubelet mkdir -p /var/lib/contrail/ports/vm | |
- name: Git clone contrail-container-builder repo | |
shell: git clone -b ${branch} https://github.com/Juniper/contrail-container-builder /root/rke-tf/contrail-container-builder | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Copy common.env into place | |
shell: cp /root/rke-tf/common.env /root/rke-tf/contrail-container-builder/common.env | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Create tf.yml manifest | |
shell: /root/rke-tf/contrail-container-builder/kubernetes/manifests/./resolve-manifest.sh /root/rke-tf/contrail-container-builder/kubernetes/manifests/contrail-standalone-kubernetes.yaml > /root/rke-tf/tf.yml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Sed vars.tf file into tf.yml | |
shell: sed -i '/.*KUBERNETES_API_SECURE_PORT.*/r /root/rke-tf/vars.tf' /root/rke-tf/tf.yml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Sed kernel-build-init into tf.yml | |
shell: sed -i "s/kernel-init/kernel-build-init/" /root/rke-tf/tf.yml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Label nodes | |
shell: /root/rke-tf/contrail-container-builder/kubernetes/manifests/./set-node-labels.sh | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Install tf.yml manifest | |
shell: kubectl apply -f /root/rke-tf/tf.yml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Chmod +x /root/rke-tf/vnc_api/fip_network.py | |
shell: chmod +x /root/rke-tf/vnc_api/fip_network.py | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Running python script to provision FIP network. Waiting for TF control plane pods to be ready. BE PATIENT, WAITING FOR THE COMMAND TO FINISH! | |
shell: /root/rke-tf/vnc_api/./fip_network.py | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Get coredns coredns-configmap.yml | |
shell: kubectl get configmap -nkube-system coredns -o yaml >> /root/rke-tf/coredns-configmap.yaml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Sed core_dns line into coredns-configmap.yml | |
shell: sed -i "s/.*forward.*/${coredns_line}/" /root/rke-tf/coredns-configmap.yaml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Apply coredns-configmap.yml changes | |
shell: kubectl apply -f /root/rke-tf/coredns-configmap.yaml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Get coredns coredns-deployment.yml | |
shell: kubectl get deployment -nkube-system coredns -o yaml >> /root/rke-tf/coredns-deployment.yaml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Remove liveness and readiness probes from coredns-deployment.yml | |
shell: sed -i "/ livenessProbe:/,+9d" /root/rke-tf/coredns-deployment.yaml ; sed -i "/ readinessProbe:/,+8d" /root/rke-tf/coredns-deployment.yaml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
- name: Apply coredns-deployment.yml changes | |
shell: kubectl apply -f /root/rke-tf/coredns-deployment.yaml | |
delegate_to: 127.0.0.1 | |
run_once: true | |
ignore_errors: true | |
EOF | |
#Run playbook | |
ANSIBLE_COW_SELECTION=tux ansible-playbook -i /root/rke-tf/inventory.ini /root/rke-tf/playbook.yml --user root -vvv | |
#Deploy and expose demo service. | |
kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml && kubectl expose deployment hello-world --type=LoadBalancer --name=my-service | |
#Wait for demo service to come up. | |
watch -t '/usr/games/cowsay -ftux "Watching kubectl get pods -o wide" && kubectl get pods -o wide && /usr/games/cowsay -ftux "Watching kubectl get svc" && kubectl get svc && /usr/games/cowsay -ftux "A demo service called 'my-service' has been deployed and exposed using --type=LoadBalancer. The service should get an EXTERNAL-IP from the external-fip network. TYPE CONTROL-C TO CONTINUE!!! This will destroy the demo service and complete the installation!"' | |
#Destroy demo. | |
kubectl delete svc my-service && kubectl delete -f https://k8s.io/examples/service/load-balancer-example.yaml | |
#Run contrail-status | |
/usr/games/cowsay -ftux "Running contrail-status" | |
ssh -l root ${tf_api_lb} contrail-status | |
#Finish | |
/usr/games/cowsay -ftux "Installation complete! | |
Login to the TungstenFabric WebUI at https://${tf_api_lb}:8143 | |
User:admin Password:contrail123" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment