Skip to content

Instantly share code, notes, and snippets.

@renoirb
Last active November 16, 2017 01:21
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save renoirb/cb14d162aa5ce4e98b141f286b90f490 to your computer and use it in GitHub Desktop.
Save renoirb/cb14d162aa5ce4e98b141f286b90f490 to your computer and use it in GitHub Desktop.
Setup Pine64 5 node cluster from longsleep Ubuntu 16.04 base image
#!/bin/bash
set -e
UNAME=$(uname)
if [ "$UNAME" != "Linux" ]; then
(>&2 echo "Not Linux. (Linux != $UNAME)")
exit 1
fi
if test "$(id -g)" -ne "0"; then
(>&2 echo "You must run this as root.")
exit 1
fi
ADAPTER=eth0
ADAPTER_IPV4=$(ifconfig ${ADAPTER}|grep 'inet addr:'|cut -d: -f2|awk '{ print $1}')
mkdir -p /etc/consul.d/
mkdir -p /var/run/consul/
mkdir -p /var/lib/consul/
if ! test -f /usr/local/bin/consul; then
mkdir -p /root/consul
curl https://releases.hashicorp.com/consul/0.7.5/consul_0.7.5_linux_arm.zip -o /root/consul/consul.zip
apt-get install -y unzip
unzip /root/consul/consul.zip -d /root/consul
mv /root/consul/consul /usr/local/bin/
fi
(cat <<- _EOF_
{
"start_join": [
"10.1.10.240"
],
"bind_addr": "${ADAPTER_IPV4}"
}
_EOF_
) > /etc/consul.d/cluster.json
# https://www.freedesktop.org/software/systemd/man/systemd.service.html
(cat <<- '_EOF_'
[Unit]
Description=Consul service discovery agent
Requires=network-online.target
After=network.target
[Service]
User=nobody
Group=nogroup
Restart=on-failure
Environment="GOMAXPROCS=`nproc`"
PermissionsStartOnly=true
ExecStartPre=/usr/local/bin/consul configtest -config-dir=/etc/consul.d
ExecStart=/usr/local/bin/consul agent $OPTIONS -config-dir=/etc/consul.d
ExecReload=/bin/kill -s HUP $MAINPID
KillSignal=SIGINT
TimeoutStopSec=5
[Install]
WantedBy=multi-user.target
_EOF_
) > /etc/systemd/system/consul.service
(cat <<- _EOF_
{
"bootstrap": true,
"server": true
}
_EOF_
) > /etc/consul.d/bootstrap.json
(cat <<- _EOF_
{
"data_dir": "/var/lib/consul",
"enable_debug": true,
"log_level": "info"
}
_EOF_
) > /etc/consul.d/common.json
chown -R nobody:nogroup /etc/consul.d/
chown -R nobody:nogroup /var/run/consul/
chown -R nobody:nogroup /var/lib/consul/
echo 'Enabling service'
systemctl enable consul.service
systemctl start consul.service
echo 'Checing with SystemD if it is up'
systemctl status consul
#!/bin/bash
set -e
if test "$(id -g)" -ne "0"; then
(>&2 echo "You must run this as root.")
exit 1
fi
command -v docker >/dev/null 2>&1 || { echo >&2 "Docker CLI is not installed. This script will not work. Aborting."; exit 1; }
ETCD_VERSION=$(curl -s -L http://127.0.0.1:2379/version | python -c "import sys, json; print(json.load(sys.stdin)['etcdserver'])")
ETCD_CONTAINER=$(docker ps -qs --filter name=etcd_etcd)
if [ ! -f /usr/local/bin/etcdctl ]; then
echo "Copy etcdctl to /usr/local/bin/etcdctl from running container ${ETCD_CONTAINER}"
docker cp 9d912a7ec695:/usr/local/bin/etcdctl /usr/local/bin/etcdctl
chmod +x /usr/local/bin/etcdctl
else
echo "/usr/local/bin/etcdctl already exists"
fi
#!/bin/bash
set -e
UNAME=$(uname)
if [ "$UNAME" != "Linux" ]; then
(>&2 echo "Not Linux. (Linux != $UNAME)")
exit 1
fi
if test "$(id -g)" -ne "0"; then
(>&2 echo "You must run this as root.")
exit 1
fi
#[ -z $ETCD_SERVERS ] && (>&2 echo "Required environment ETCD_SERVERS is not set."; exit 1)
#[ -z $FLANNEL_NET ] && (>&2 echo "Required environment FLANNEL_NET is not set."; exit 1)
# Assuming eth0:1 on our master k8s node has IP 10.1.10.240
CFG_PUBLIC_ADAPTER="eth0:1"
FLANNEL_PUBLIC_IP="${FLANNEL_PUBLIC_IP:-10.1.10.240}"
ETCD_SERVERS="http://${FLANNEL_PUBLIC_IP}:4001"
# Is this the range it will have internally?
# https://github.com/kubernetes/kubernetes/issues/43815#issuecomment-290235245
# https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# https://kubernetes.io/docs/getting-started-guides/kubeadm/#24-initializing-your-master
FLANNEL_NET="10.244.0.0/16"
# See also
# - https://github.com/kubernetes/kubernetes/blob/master/cluster/centos/master/scripts/flannel.sh
PACKAGE_SERVICE_NAME="flanneld"
PACKAGE_NAME="flannel"
BINARY_NAME="flanneld"
ARCH="arm64"
RELEASE="0.7.1"
ARCHIVE_EXT="tar.gz"
PACKAGE_REPO="https://github.com/coreos/${PACKAGE_NAME}/releases/download/"
# https://github.com/coreos/flannel/releases/download/v0.7.1/flannel-v0.7.1-linux-arm64.tar.gz
PACKAGE_URL=${PACKAGE_REPO}v${RELEASE}/${PACKAGE_NAME}-v${RELEASE}-linux-${ARCH}.${ARCHIVE_EXT}
PACKAGE_UNPACK_PATH="/root/${PACKAGE_NAME}"
PACKAGE_BIN_DEST="/usr/local/bin/${BINARY_NAME}"
# Normally we have --strip-components=1 but package vendor build system would make the file we want not to be extracted
ARCHIVE_EXTRACT_CMD="tar xfz ${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME}.${ARCHIVE_EXT} -C ${PACKAGE_UNPACK_PATH}"
if ! test -f "${PACKAGE_BIN_DEST}"; then
echo "Package is NOT in ${PACKAGE_BIN_DEST}, we will install"
echo "Creating ${PACKAGE_UNPACK_PATH}"
mkdir -p "${PACKAGE_UNPACK_PATH}"
if ! test -f "${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME}.${ARCHIVE_EXT}"; then
echo "Downloading ${PACKAGE_URL} as ${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME}.${ARCHIVE_EXT}"
curl -s -S -L ${PACKAGE_URL} -o ${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME}.${ARCHIVE_EXT}
fi
echo "Executing ${ARCHIVE_EXTRACT_CMD}"
$(${ARCHIVE_EXTRACT_CMD})
echo "Moving ${PACKAGE_UNPACK_PATH}/${BINARY_NAME} as ${PACKAGE_BIN_DEST}"
mv ${PACKAGE_UNPACK_PATH}/${BINARY_NAME} ${PACKAGE_BIN_DEST}
else
echo "Package is already in ${PACKAGE_BIN_DEST}"
fi
## Remove any non commented lines that has net.ipv4.ip_forward from maintainer
## We want to make sure it exists only once.
sed -i 's/^net.ipv4.ip_forward.*$//' /etc/sysctl.conf
if ! grep -R 'ipv4.ip_forward' /etc/sysctl.conf > /dev/null; then
echo "ipv4.ip_forward line did not exist, appending into /etc/sysctl.conf"
(cat <<- _EOF_
net.ipv4.ip_forward=1
_EOF_
) >> /etc/sysctl.conf
fi
(cat <<- _EOF_
FLANNEL_PUBLIC_IP="--public-ip=${FLANNEL_PUBLIC_IP}"
FLANNEL_ADAPTER="--iface=${CFG_PUBLIC_ADAPTER}"
FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}"
FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=${CA_FILE}"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=${CERT_FILE}"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=${KEY_FILE}"
_EOF_
) > /etc/default/${PACKAGE_SERVICE_NAME}
(cat <<- _EOF_
[Unit]
Description=Flanneld overlay address etcd agent
Documentation=https://github.com/coreos/flannel
After=network.target
Before=docker.service
[Service]
EnvironmentFile=-/etc/default/${PACKAGE_SERVICE_NAME}
ExecStart=${PACKAGE_BIN_DEST} --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ADAPTER} \${FLANNEL_PUBLIC_IP} \${FLANNEL_ETCD_KEY} \${FLANNEL_ETCD_CAFILE} \${FLANNEL_ETCD_CERTFILE} \${FLANNEL_ETCD_KEYFILE}
ExecStartPre=/sbin/modprobe ip_tables
Type=notify
[Install]
WantedBy=multi-user.target
_EOF_
) > /etc/systemd/system/${PACKAGE_SERVICE_NAME}.service
chown nobody:nogroup ${PACKAGE_BIN_DEST}
chown nobody:nogroup /etc/systemd/system/${PACKAGE_SERVICE_NAME}.service
if test -f "/etc/flannel.json"; then
(cat <<- _EOF_
{
"Network": "${FLANNEL_NET}",
"Backend": {
"Type": "vxlan"
}
}
_EOF_
) > /etc/flannel.json
fi
echo 'Enabling service'
systemctl enable ${PACKAGE_SERVICE_NAME}.service
systemctl start ${PACKAGE_SERVICE_NAME}.service
echo 'Checing with SystemD if it is up'
systemctl status ${PACKAGE_SERVICE_NAME}

Manual install

As per Kubernetes.io/docs/getting-started-guides/ubuntu/manual

Add on node0

UNFINISHED!!

curl -s -S -L https://raw.githubusercontent.com/webplatform/salt-states/master/webplatform/files/screenrc.jinja -o .screenrc
curl -s -S -L https://raw.githubusercontent.com/webplatform/salt-states/master/users/files/renoirb/gitconfig -o .gitconfig

On node0

sudo apt install -y bridge-utils
salt node[1-4] grains.append roles '[kubernetes-pool]'
salt-call grains.append roles '[kubernetes-master]'
salt-call grains.append roles '[salt-master]'
salt-call -l debug pkg.install kubelet,kubeadm,kubectl,kubernetes-cni

On all nodes, add to the /etc/hosts

10.1.10.240     kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.kube.local kube.local

Make sure you have network options commented for first init, like so:

# In /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# Add this line, comment the other (temporarily)
Environment="KUBELET_NETWORK_ARGS="

...

sudo vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
sudo systemctl daemon-reload
sudo systemctl restart kubelet.service
sudo systemctl restart docker.service

On the Node you want the master, run

kubeadm init --apiserver-advertise-address 10.1.10.240 --pod-network-cidr 10.244.0.0/16 --apiserver-cert-extra-sans=kube.local --service-dns-domain kube.local

Wait a bit, then:

mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf ~/.kube/config
sudo chown picocluster:picocluster ~/.kube/config

If all goes well, keep record of kubeadm join (see sample below at Get cluster token)

If you see error messages like this in logs

Apr 21 05:19:40 node0 kubelet[7197]: E0421 05:19:40.637452    7197 kubelet.go:2067] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized

As per this issue:

  • Temporarily remove KUBELET_NETWORK_ARGS from /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
  • Edit your /etc/systemd/system/kubelet.service.d/10-kubeadm.conf and add the flag --cgroup-driver="systemd"
# In /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# Ensure ExecStart has KUBELET_EXTRA_ARGS, and add this line before it
Environment="KUBELET_EXTRA_ARGS=--cgroup-driver=systemd"

FOR EACH NODES of the cluster!

You do not need to kubeadm reset on master, but rather restart

sudo systemctl daemon-reload
sudo systemctl restart kubelet.service
sudo systemctl restart docker.service

docker ps -qs --filter name=etcd_etcd
foo

export ETCDCONTAINER=foo
sudo docker cp $ETCDCONTAINER:/usr/local/bin/etcd /usr/local/bin/etcd
sudo docker cp $ETCDCONTAINER:/usr/local/bin/etcdctl /usr/local/bin/etcdctl
sudo chmod +x /usr/local/bin/etcd{,ctl}
etcdctl set /coreos.com/network/config '{"Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}}'
sudo systemctl status flanneld
sudo systemctl restart flanneld

Get cluster token

See notes from kubeadm init comand above

[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.6.0
[init] Using Authorization mode: RBAC
[preflight] Running pre-flight checks
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [node0 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.kube.local kube.local] and IPs [10.96.0.1 10.1.10.240]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 37.530146 seconds
[apiclient] Waiting for at least one node to register
[apiclient] First node has registered after 3.535083 seconds
[token] Using token: MAH.T0K33N
[apiconfig] Created RBAC rules
[addons] Created essential addon: kube-proxy
[addons] Created essential addon: kube-dns

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run (as a regular user):

  sudo cp /etc/kubernetes/admin.conf $HOME/
  sudo chown $(id -u):$(id -g) $HOME/admin.conf
  export KUBECONFIG=$HOME/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  http://kubernetes.io/docs/admin/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join --token MAH.T0K33N 10.1.10.240:6443

If you get all Ready states

kubectl get nodes

NAME      STATUS    AGE       VERSION
node0     Ready     9m        v1.6.1
node1     Ready     7m        v1.6.1
node2     Ready     7m        v1.6.1
node3     Ready     7m        v1.6.1
node4     Ready     7m        v1.6.1

You can also check pods status

kubectl get pods -o wide --all-namespaces

NAMESPACE     NAME                            READY     STATUS    RESTARTS   AGE       IP              NODE
kube-system   etcd-node0                      1/1       Running   1          8m        192.168.0.103   node0
kube-system   kube-apiserver-node0            1/1       Running   2          7m        192.168.0.103   node0
kube-system   kube-controller-manager-node0   1/1       Running   2          8m        192.168.0.103   node0
kube-system   kube-dns-2286869516-mjt3n       3/3       Running   3          8m        10.244.0.2      node0
kube-system   kube-flannel-ds-1h1tp           2/2       Running   0          1m        192.168.0.112   node2
kube-system   kube-flannel-ds-9w3r4           2/2       Running   2          7m        192.168.0.103   node0
kube-system   kube-flannel-ds-tcm7v           2/2       Running   0          1m        10.1.10.243     node3
kube-system   kube-flannel-ds-z5mz9           2/2       Running   0          1m        10.1.10.241     node1
kube-system   kube-proxy-dzcjr                1/1       Running   0          1m        192.168.0.112   node2
kube-system   kube-proxy-h68m9                1/1       Running   1          8m        192.168.0.103   node0
kube-system   kube-proxy-s8b0g                1/1       Running   0          1m        10.1.10.243     node3
kube-system   kube-proxy-t1wgm                1/1       Running   0          1m        10.1.10.241     node1
kube-system   kube-scheduler-node0            1/1       Running   1          8m        192.168.0.103   node0

##RBx

Next step, Networking layer

Networking layer

UNFINISHED

See this post and Install and run Flannel

curl -sSL "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml?raw=true" -o kube-flannel-rbac.yml
curl -sSL "https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml?raw=true" -o kube-flannel.yml
sed -i "s/amd64/arm64/g" kube-flannel.yml

Then...

# As described on top of kube-flannel-rbac.yml
kubectl create -f kube-flannel-rbac.yml
kubectl create --namespace kube-system -f kube-flannel.yml

Then we should see

picocluster@node0:~$ kubectl get po --all-namespaces

NAMESPACE     NAME                            READY     STATUS    RESTARTS   AGE
...
kube-system   kube-proxy-1vcbd                1/1       Running   0          9m
kube-system   kube-proxy-245nz                1/1       Running   0          9m
kube-system   kube-proxy-7hsc9                1/1       Running   0          11m
kube-system   kube-proxy-dsklx                1/1       Running   0          9m
kube-system   kube-proxy-qs2vn                1/1       Running   0          9m

... AND FLANNEL, not there yet. Because CNI. TODO

Install Kubernetes dashboard

curl -sSL https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml | sed "s/amd64/arm64/g" > kubernetes-dashboard.yml
kubectl create -f - kubernetes-dashboard.yml

Delete a deployment on kube-system

kubectl delete deployment kubernetes-dashboard --namespace=kube-system
kubectl get deployment --namespace=kube-system

See also

#!/bin/bash
set -e
# https://blog.hypriot.com/post/getting-docker-running-on-a-high-density-armv8-server-from-hisilicon/
# https://github.com/DieterReuter/qemu-arm-box
# https://github.com/DieterReuter/arm64-docker-builder
# https://medium.com/@kurt.stam/docker-on-pine64-centos-7-arm64-b4606632f28d
# https://project31.github.io/pine64/
# https://github.com/luxas/kubernetes-on-arm/
KERNEL_RELEASE=$(uname -r)
if [[ "${KERNEL_RELEASE}" =~ ^3\.10 ]]; then
echo "Kernel 3.10"
# https://github.com/docker/docker/issues/10353
# > overlayfs is the "legacy overlay filesystem"
# > overlay is the latest
# > We support overlay.
(cat <<- _EOF_
ebt_vlan
overlayfs
nf_nat_ipv4
nf_nat_ipv6
xt_nat
nf_nat
cls_cgroup
_EOF_
) > /etc/modules-load.d/pine64_arm64_docker.conf
fi
(cat <<- _EOF_
kernel.keys.root_maxkeys = 1000000
_EOF_
) >> /etc/sysctl.conf
apt update
echo 'Install misc requirements'
apt install -y \
software-properties-common \
apt-transport-https \
ca-certificates \
libsqlite3-dev \
apparmor \
curl
# https://docs.docker.com/engine/installation/linux/ubuntu/#install-using-the-repository
echo 'Install Docker from repository'
apt install -y \
bridge-utils \
cgroup-lite \
btrfs-tools \
aufs-tools \
docker.io
echo 'Adding ubuntu and picocluster to docker group'
usermod -aG docker ubuntu
usermod -aG docker picocluster
if ! grep -R 'cgroupdriver' /lib/systemd/system/docker.service > /dev/null; then
sed -i 's:^ExecStart=/usr/bin/dockerd -H:ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd -H:' /lib/systemd/system/docker.service
# (cat <<- _EOF_
## UNDER SERVICE TODO
## restart the docker process if it exits prematurely
#Restart=on-failure
#StartLimitBurst=3
#StartLimitInterval=60s
#_EOF_
#) >> /lib/systemd/system/docker.service
fi
# https://github.com/luxas/kubernetes-on-arm/
echo 'Setup Kubernetes: common stuff'
if [[ ! -f /etc/apt/sources.list.d/kubernetes.list ]]; then
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
apt update
fi
apt update && apt install -y kubeadm
# Then, on ONE of nodes, run.
# kubeadm init --apiserver-advertise-address 10.1.10.240 --pod-network-cidr 10.244.0.0/16 --apiserver-cert-extra-sans=kube.local --service-dns-domain kube.local --token-ttl 90m
IMAGE_NAME:=xenial-pine64-bspkernel-20161218-1.img.xz
IMAGE_HREF:=https://www.stdin.xyz/downloads/people/longsleep/pine64-images/ubuntu/
packages/${IMAGE_NAME}: packages
curl -s -S -L ${IMAGE_HREF}${IMAGE_NAME} -o packages/${IMAGE_NAME}
packages:
@mkdir -p packages
.PHONY: help
help:
@curl -s -S -L ${IMAGE_HREF}README.txt
@echo "\nSee also:\n - http://linux-sunxi.org/UEnv.txt\n - http://www.denx.de/wiki/view/DULG/UBootEnvVariables\n - https://github.com/longsleep/build-pine64-image\n\n"
@diskutil list
@echo "\n\nThen craft the following command yourself, plz:\n xzcat packages/${IMAGE_NAME}|pv|sudo dd bs=1m of=...\n\n----"
@echo "\nOnce done, also copy:\n cp kubernetes.sh node_exporter.sh pine64.sh virtual_interface_network.sh /Volumes/BOOT\n\n----"
@echo "\nOn the new node, initialize like so:\n cd /boot ; sudo -HE bash -l"
@echo " # Imagine it is node2, we expect private IP to be 242\n NODE_NUMBER=2 NODE_FIRST_POS=240 bash virtual_interface_network.sh"
@echo " # Finish up personalizing image\n bash pine64.sh"
#!/bin/bash
set -e
UNAME=$(uname)
if [ "$UNAME" != "Linux" ]; then
(>&2 echo "Not Linux. (Linux != $UNAME)")
exit 1
fi
if test "$(id -g)" -ne "0"; then
(>&2 echo "You must run this as root.")
exit 1
fi
PACKAGE_SERVICE_NAME="node-exporter"
PACKAGE_SERVICE_PORT="9100"
PACKAGE_NAME="node_exporter"
ARCH="arm64"
RELEASE="0.14.0"
ARCHIVE_EXT="tar.gz"
PACKAGE_REPO="https://github.com/prometheus/${PACKAGE_NAME}/releases/download/"
PACKAGE_URL=${PACKAGE_REPO}v${RELEASE}/${PACKAGE_NAME}-${RELEASE}.linux-${ARCH}.${ARCHIVE_EXT}
PACKAGE_UNPACK_PATH="/root/${PACKAGE_NAME}"
PACKAGE_BIN_DEST="/usr/local/bin/${PACKAGE_NAME}"
ARCHIVE_EXTRACT_CMD="tar xfz ${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME}.${ARCHIVE_EXT} --strip-components=1 -C ${PACKAGE_UNPACK_PATH}"
if ! test -f "${PACKAGE_BIN_DEST}"; then
echo "Package is NOT in ${PACKAGE_BIN_DEST}, we will install"
mkdir -p "${PACKAGE_UNPACK_PATH}"
curl -s -S -L "${PACKAGE_URL}" -o ${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME}.${ARCHIVE_EXT}
$(${ARCHIVE_EXTRACT_CMD})
mv ${PACKAGE_UNPACK_PATH}/${PACKAGE_NAME} ${PACKAGE_BIN_DEST}
else
echo "Package is already in ${PACKAGE_BIN_DEST}"
fi
(cat <<- _EOF_
[Unit]
Description=${PACKAGE_SERVICE_NAME} service
After=local-fs.target network-online.target network.target
Wants=local-fs.target network-online.target network.target
[Service]
User=nobody
Group=nogroup
Restart=on-failure
ExecStart=${PACKAGE_BIN_DEST}
Type=simple
[Install]
WantedBy=multi-user.target
_EOF_
) > /etc/systemd/system/${PACKAGE_SERVICE_NAME}.service
chown nobody:nogroup ${PACKAGE_BIN_DEST}
chown nobody:nogroup /etc/systemd/system/${PACKAGE_SERVICE_NAME}.service
if test -d "/etc/consul.d/"; then
echo "{\"service\": {\"name\": \"${PACKAGE_SERVICE_NAME}\", \"port\": ${PACKAGE_SERVICE_PORT}}}" | sudo tee /etc/consul.d/${PACKAGE_SERVICE_NAME}.json
fi
echo 'Enabling service'
systemctl enable ${PACKAGE_SERVICE_NAME}.service
systemctl start ${PACKAGE_SERVICE_NAME}.service
echo 'Checing with SystemD if it is up'
systemctl status ${PACKAGE_SERVICE_NAME}
#!/bin/bash
set -e
export DEBIAN_FRONTEND=noninteractive
export LANG="en_US.UTF-8"
export LANGUAGE="en_US"
export LC_ALL="en_US.UTF-8"
UNAME=$(uname)
if [ "$UNAME" != "Linux" ]; then
(>&2 echo "Not Linux. (Linux != $UNAME)")
exit 1
fi
if test "$(id -g)" -ne "0"; then
(>&2 echo "You must run this as root.")
exit 1
fi
if test ! -f /usr/local/sbin/resize_rootfs.sh; then
(>&2 echo "This script is designed to run on a Pine64 Linux image from longsleep and expects /usr/local/sbin/resize_rootfs.sh to exist.")
exit 1
fi
sed -i '/^%sudo/ s/ALL$/NOPASSWD:ALL/' /etc/sudoers
locale-gen "${LANG}"
update-locale LANG=${LANG} LANGUAGE=${LANGUAGE} LC_ALL=${LANG}
locale
# In last 12 lines we have mention of xconsole.
# Hopefully they won't change from the bottom because we'll lose all logging
if grep -R 'xconsole' /etc/rsyslog.d/50-default.conf > /dev/null; then
# Awesome sed http://stackoverflow.com/questions/13380607/how-to-use-sed-to-remove-the-last-n-lines-of-a-file#answer-13383331
sed -i -e :a -e '$d;N;2,12ba' -e 'P;D' /etc/rsyslog.d/50-default.conf
fi
adduser picocluster
mkdir -p /home/picocluster/.ssh
usermod -aG sudo picocluster
chown -R picocluster:picocluster /home/picocluster/.ssh
if ! test -f /home/picocluster/.ssh/authorized_keys; then
(cat <<- _EOF_
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFj/NFhvDC8/jefyS1yjNtw+LV8buTsIE2zm55m9rDIv renoirb@Hairy.local
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC3yWFgjwICPb8kQdkO8OX228tGnRLzCvEV74QccCIGwZ3KvXzN9RDRdUZ7fr5sGhwx5s7WQbXkLwOtAxyAUPB1K2DJnJiK/99n4lEjR3vUZN5p7ni7LsrwuoD0A7fF3PlBILYI294xaI/nikJFP14MKgX2TZcEBfY6bVeNmIuthlimKsfpIA2KtKm56zurMjVfjPCQYmcrThs0Wa4ArlAal8IlwPcLAJrjWaFfqjJlIA+PwclXj1xbRLhALkwNmFwkTsea1oT70ydFAeWH+Ui8+bTpjtEIthDVL1BkQ8mMhbrRXa/rVFU72ENc7iY2pknKSBA0hlRRumG8gYKAAhh1 hello@renoirboulanger.com
_EOF_
) >> /home/picocluster/.ssh/authorized_keys
chown -R picocluster:picocluster /home/picocluster/.ssh
fi
sed -i '/ pine64$/d' /etc/hosts
echo "deb http://apt.armbian.com $(lsb_release -cs) main utils" | tee /etc/apt/sources.list.d/armbian.list
apt-key adv --keyserver keys.gnupg.net --recv-keys 0x93D6889F9F0E78D5
apt-get update
apt-get install -y apt-transport-https ca-certificates curl software-properties-common htop unzip parted exfat-fuse exfat-utils sunxi-tools
#apt-get install -y build-essential
#apt-get install -y dnsutils
apt-get install -y parted
sh /usr/local/sbin/resize_rootfs.sh
partprobe
/usr/local/sbin/pine64_update_uboot.sh
/usr/local/sbin/pine64_update_kernel.sh
#
# Do the following from the first node (e.g. node0) as picocluster user:
#
# export MAX=$(cat /etc/hosts|grep cluster_member|wc -l| awk '{zero_indexed =($1 - 1)}END{print zero_indexed}')
# for((i=1;i<=$MAX;i+=1)); do /usr/bin/ssh-copy-id -i /home/picocluster/.ssh/id_rsa.pub picocluster@node${i}; done
#
# Eventually see how to differenciate if we are on first node and run the code above.
# If you have no keys, do:
#
# /usr/bin/ssh-keygen -t rsa -P '' -f /home/picocluster/.ssh/id_rsa
# sudo cat /home/picocluster/.ssh/id_rsa.pub >> /home/picocluster/.ssh/authorized_keys
#
#!/bin/bash
set -e
curl -s -S -L https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
curl -s -S -L https://bootstrap.saltstack.com -o /usr/local/sbin/salt-bootstrap && chmod +x /usr/local/sbin/salt-bootstrap
## REMOVE SALT to Re-Install
# sudo service salt-master stop
# sudo service salt-minion stop
# sudo rm -rf /var/log/salt /var/cache/salt /usr/share/man/man1/salt-* /usr/lib/python2.7/dist-packages/salt /etc/salt /usr/bin/salt*
# sudo rm -rf /lib/systemd/system/salt-api.service /lib/systemd/system/salt-master.service /lib/systemd/system/salt-minion.service /usr/lib/python2.7/dist-packages/salt-2016.11* /etc/sysctl.d/99-salt.conf /etc/systemd/system/multi-user.target.wants/salt-master.service /etc/systemd/system/multi-user.target.wants/salt-minion.service
if [ "$(hostname)" = "node0" ]; then
# On master
# -S for salt-syndic (i.e. make another machine make calls to node0, to control the cluster)
# -M for salt-master (e.g. control node0... cluster nodes)
salt-bootstrap -q -i $(hostname) -A node0 -r -P -M git 2016.11
mkdir -p /etc/salt/master.d
else
salt-bootstrap -q -i $(hostname) -A node0 -r -P git 2016.11
fi
## Samples, not required
# salt-call --local hosts.set_host 127.0.1.1 $(hostname)
## https://docs.saltstack.com/en/latest/ref/modules/all/salt.modules.file.html#salt.modules.file.manage_file
# salt-call --local file.manage_file /etc/salt/minion.d/master.conf '' '{}' None '{}' root root '644' base '' contents="master: node0"
#!/bin/bash
(cat <<- _EOF_
set tabstop=4
set shiftwidth=4
set expandtab
set softtabstop=4
set colorcolumn=80
set laststatus=2
set nowrap
set cursorline
_EOF_
) > ~/.vimrc
#!/bin/bash
NODE_NAME_PREFIX="${NODE_NAME_PREFIX:-node}"
IPV4_INTERNAL_PREFIX="${IPV4_INTERNAL_PREFIX:-10.1.10.}"
IPV4_INTERNAL_NETMASK=${IPV4_INTERNAL_NETMASK:-255.255.255.0}
NODE_FIRST_POS=${NODE_FIRST_POS:-10}
function stderr { printf "$@\n" >&2; }
trap "stderr 'Timeout caught.' && exit 1" SIGTERM
set -e -o pipefail
function is_number(){
is_number='^[0-9]+$'
if [[ ! $1 =~ $is_number ]]; then
return 1
else
return 0
fi
}
#echo 'Tests is_number'
#if is_number a; then echo 'FAIL: "a" should not pass'; else printf '.'; fi
#if is_number 'a'; then echo 'FAIL: "a" passed as string should not pass'; else printf '.'; fi
#if is_number '1'; then printf '.'; else echo 'FAIL: Number as string "1" should pass'; fi
#if is_number 1; then printf '.'; else echo 'FAIL: Number 1 should pass'; fi
#echo
function between_zero_and_fe(){
if [ $1 -eq 0 > /dev/null 2>&1 ]; then
## Input equals 0, OK
return 0
elif [ $1 -gt 0 -a $1 -le 244 > /dev/null 2>&1 ]; then
## Input is greater than 0 AND less than 244
return 0
else
## Input DOES NOT match range
return 1
fi
}
#echo 'Tests between_zero_and_fe'
#if between_zero_and_fe '' > /dev/null 2>&1; then echo 'FAIL: Empty input "" should not pass. We expect a digit.'; else printf '.'; fi
#if between_zero_and_fe 'a' > /dev/null 2>&1; then echo 'FAIL: "a" should not pass. We expect a digit.'; else printf '.'; fi
#if between_zero_and_fe foo > /dev/null 2>&1; then echo 'FAIL: "foo" should not pass. We expect a digit.'; else printf '.'; fi
#if between_zero_and_fe 999 > /dev/null 2>&1; then echo 'FAIL: Number 999 should not pass. Although it is a digit, it is higher than expected'; else printf '.'; fi
#if between_zero_and_fe "999" > /dev/null 2>&1; then echo 'FAIL: Number as string "999" should not pass. Although it is a digit, it is highger than expected'; else printf '.'; fi
#if between_zero_and_fe 99 > /dev/null 2>&1; then printf '.'; else echo 'FAIL: Number 99 SHOULD pass, it is within 0..244 range'; fi
#if between_zero_and_fe "5" > /dev/null 2>&1; then printf '.'; else echo 'FAIL: Number as string "5" SHOULD pass, it is within 0..244 range'; fi
#echo
function between_current_and_fe(){
## BOTH inputs MUTS be numbers
if ! is_number $1; then
return 1
fi
if ! is_number $2; then
return 1
fi
if [ $1 -le $2 -a $2 -le 244 ]; then
if [ $1 -eq $2 ]; then
return 1
else
return 0
fi
fi
return 1
}
#echo 'Tests between_current_and_fe'
#if between_current_and_fe 5 1; then echo 'FAIL: This should fail because Number 5 is higher than maximum of a set of 1 '; else printf '.'; fi
#if between_current_and_fe '5' '1'; then echo 'FAIL: This should fail because Number passed as string "5" is higher than said maximum of a set of "1"'; else printf '.'; fi
#if between_current_and_fe 1 5; then printf '.'; else echo 'FAIL: This should pass because we can have member Number 1 out of 5 others'; fi
#if between_current_and_fe "1" '5'; then printf '.'; else echo 'FAIL: This should pass because we can have a member Number "1" (passed as string) out of "5" others (notice 5 passed as string)'; fi
#if between_current_and_fe 1 1; then printf "\nFAIL: Current (1) is not zero indexed, but second argument (1) is. In a 1 node cluser, we would only have one node called node0.\n"; else printf '.'; fi
#if between_current_and_fe foo 1; then echo 'FAIL: This should fail because first argument was "foo" is not a number and we expect only digits'; else printf '.'; fi
#if between_current_and_fe 1 bar; then echo 'FAIL: This should fail because second argument was "bar" is not a number and we expect only digits'; else printf '.'; fi
#if between_current_and_fe foo bar; then echo 'FAIL: This should fail because both arguments are strings and we expect only digits'; else printf '.'; fi
#echo
function is_valid_dns_name() {
dns_name_regex='^[a-z][a-z0-9]+$'
if [[ $1 =~ $dns_name_regex ]]; then
return 0
fi
return 1
}
#echo 'Tests is_valid_dns_name'
#if is_valid_dns_name 'node1'; then printf '.'; else echo 'FAIL: "node1" is a valid DNS name'; fi
#if is_valid_dns_name 'node2'; then printf '.'; else echo 'FAIL: "node2" is a valid DNS name'; fi
#if is_valid_dns_name 'n0de'; then printf '.'; else echo 'FAIL: "n0de" is a valid DNS name'; fi
#if is_valid_dns_name '1node'; then echo 'FAIL: "1node" is invalid, DNS names MUST NOT start by digits'; else printf '.'; fi
#if is_valid_dns_name 'node_234'; then echo 'FAIL: "node_234" is invalid, DNS names CANNOT contain underscore'; else printf '.'; fi
#if is_valid_dns_name 234; then echo 'FAIL: "234" is invalid, DNS names CANNOT contain only digits'; else printf '.'; fi
#echo
if test "$(id -g)" -ne "0"; then
stderr 'You must run this as root.'
exit 1
fi
if ! is_number ${NODE_FIRST_POS}; then
stderr "NODE_FIRST_POS environment MUST be a number, \"${NODE_FIRST_POS}\" is invalid."
exit 1
fi
###
## Validate a number for a node number
## Must be between 0 and 244
## e.g. node1 will have private IP 10.10.0.11
if ! is_number $NODE_NUMBER; then
NODE_NUMBER=""
fi
if ! between_zero_and_fe $NODE_NUMBER; then
NODE_NUMBER=""
fi
while ! is_number $NODE_NUMBER; do
read -p "What is the node number you want this to be? " NODE_NUMBER
if between_zero_and_fe $NODE_NUMBER; then
NODE_NUMBER=$NODE_NUMBER
else
stderr "Input \"$NODE_NUMBER\" is invalid, it MUST be a number and under 244"
NODE_NUMBER=""
fi
done
###
###
## Validate how many members of the cluster we will have.
## We want to know how many we will have so we know how many
## /etc/hosts entries to make
##
## Catch situation where we privde a number as environment
## but is an invalid type.
if ! is_number $NODE_COUNT_MAX; then
NODE_COUNT_MAX=""
fi
COUNTER_CURRENT_AND_MAX=0
MAX_ITER_COUNTER_CURRENT_AND_MAX=3
while ! is_number $NODE_COUNT_MAX; do
read -p "How many nodes will we have? " NODE_COUNT_MAX
if ! between_current_and_fe $NODE_NUMBER $NODE_COUNT_MAX; then
if [[ ${NODE_NUMBER} -eq ${NODE_COUNT_MAX} ]]; then
NODE_COUNT_MAX_TIP=$(printf %d $((${NODE_COUNT_MAX} + 1)))
stderr "Is this the last node of a cluster? If so, maybe you mean \"${NODE_COUNT_MAX_TIP}\"?"
else
stderr "Maximum node number \"${NODE_COUNT_MAX}\" cannot be lower than \"${NODE_NUMBER}\""
fi
NODE_COUNT_MAX=""
else
break;
fi
if [[ $COUNTER_CURRENT_AND_MAX -gt 0 ]]; then
printf "(${COUNTER_CURRENT_AND_MAX}/${MAX_ITER_COUNTER_CURRENT_AND_MAX}) "
if [[ ${COUNTER_CURRENT_AND_MAX} -ge ${MAX_ITER_COUNTER_CURRENT_AND_MAX} ]]; then
stderr "Maximum iterations reached. Aborting"
exit 1
fi
fi
COUNTER_CURRENT_AND_MAX=$(printf %d $((${COUNTER_CURRENT_AND_MAX} + 1)))
done
NODE_COUNT_MAX=$(printf %d $((${NODE_COUNT_MAX} - 1)))
###
NODE_POS=$(printf %d $((${NODE_NUMBER} + ${NODE_FIRST_POS})))
IPV4_INTERNAL="${IPV4_INTERNAL_PREFIX}${NODE_POS}"
NODE_NAME="${NODE_NAME_PREFIX}${NODE_NUMBER}"
NODE_COUNT_MAX_HUMAN=$(printf %d $((${NODE_COUNT_MAX} + 1)))
echo "# This is the ${NODE_NUMBER}th node of a ${NODE_COUNT_MAX_HUMAN} total nodes cluster, it will be known as ${NODE_NAME} with IPv4 private IP ${IPV4_INTERNAL}"
if test -z ${NODE_NAME}; then
read -p 'What is the name you want to give to this node?: ' NODE_NAME
# Thanks http://stackoverflow.com/questions/5284147/validating-ipv4-addresses-with-regexp
regex='^[a-z][a-z0-9]+$'
if [[ ! $NODE_NAME =~ $regex ]]; then
stderr "${NODE_NAME} would not be a valid DNS node name, try with something else"
exit 1
fi
fi
if test -z ${IPV4_INTERNAL}; then
read -a IPV4_INTERNAL -p 'What is the IPv4 address you want to give on the private network?: '
# Thanks http://stackoverflow.com/questions/5284147/validating-ipv4-addresses-with-regexp
regex='^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'
if [[ ! $IPV4_INTERNAL =~ $regex ]]; then
stderr "${IPV4_INTERNAL} is NOT a valid IPv4 address, try with something else."
exit 1
fi
fi
hostnamectl set-hostname ${NODE_NAME}
sed -i '/^auto eth0/ s/eth0$/eth0 eth0:1/' /etc/network/interfaces.d/eth0
#sed -i '/^iface eth0 inet dhcp/ s/eth0 inet dhcp$/eth0:1 inet dhcp/' /etc/network/interfaces.d/eth0
(cat <<- _EOF_
iface eth0:1 inet static
address ${IPV4_INTERNAL}
netmask ${IPV4_INTERNAL_NETMASK}
_EOF_
) >> /etc/network/interfaces.d/eth0
ifup eth0:1
LIST=""
for i in `seq 0 $NODE_COUNT_MAX`; do
if [[ ! "${NODE_NUMBER}" = "${i}" ]]; then
NODE_POS=$(printf %d $((${i} + ${NODE_FIRST_POS})))
IP="${IPV4_INTERNAL_PREFIX}${NODE_POS}"
APPEND_CLUSTER_MEMBER=""
else
IP="127.0.1.1"
APPEND_CLUSTER_MEMBER=" self"
fi
LIST+="${IP}\t${NODE_NAME_PREFIX}${i}\t# cluster_member ${APPEND_CLUSTER_MEMBER}\n"
done
grep -q -e "${NODE_NAME_PREFIX}${i}" /etc/hosts || printf $"\n##########\n${LIST}##########\n" >> /etc/hosts
echo Done!
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment