Skip to content

Instantly share code, notes, and snippets.

# For 2.4 bundle launch few extra parameters have to be provided. Following set of commands can be run to
# convert yavlab floating ip to those 5 parameters. Adjust FLOAT_RANGE.
YAVLAB_DEF_GW="10.32.0.1"
PUBLIC_IF="eth0"
FLOAT_RANGE="10.35.237.0/24"
inet_aton() {
local a b c d
{ IFS=. read a b c d; } <<< $1
# Install docker
# Stop docker services and wipe out /var/lib/docker
service docker stop
rm -rf /var/lib/docker
# Create pv, lv. Following uses /dev/sdb1, add more partitions or disks here
pvcreate /dev/sdb1
# Create docker VG using the above created pvs
# Set this to the new size of the pool
NEW_SIZE_GB=200
NEW_SIZE=$(( $NEW_SIZE_GB*1024*1024*1024 ))
META_SIZE_GB=$(echo "$NEW_SIZE_GB*.02" | bc -l)
NEW_META_SIZE_GB=${META_SIZE_GB%.*}
POOL=$(docker info | grep pool | awk '{print $3}')
DEVMAPPER_POOL="/dev/mapper/$POOL"
# Launch the container from bd_mgmt and invoke the following through a bdconfig command. Two parameters NEW_SIZE and INSTANCE_NAME
# docker run -d --name test bluedata/centos6
INSTANCE_NAME="test"
NEW_SIZE_GB="30"
NEW_SIZE=$(( $NEW_SIZE_GB*1024*1024*1024 ))
CONT_ID=$(docker inspect -f {{.Id}} $INSTANCE_NAME)
# Set the container name here
INSTANCE_NAME="bluedata-12"
# Get the actual device mapper pool use by docker
POOL=$(docker info | grep pool | awk '{print $3}')
DEVMAPPER_POOL="/dev/mapper/$POOL"
DEV="$INSTANCE_NAME-dev"
# Get the container id of the instance
CONT_ID=$(docker inspect -f {{.Id}} $INSTANCE_NAME)
# Set the container name here
INSTANCE_NAME="bluedata-400"
# Extract the device num from the instance name
INSTANCE_DEV_ID=$(echo $INSTANCE_NAME | cut -d"-" -f2)
# device name to use for the snapshot
SNAP_NAME=$INSTANCE_NAME-snap
# To create a snapshot device, we need to specify a unique device id. This has to be 24-bit number
# Going to start from 16484 as the base
# Setup keypair to access all your nodes from your mac
# Define the master node
MASTER_NODE="10.32.1.58"
# Define all minions
MINION_NODES="10.32.1.70 10.32.1.189"
# Define the pod network
POD_NETWORK_RANGE="192.168.0.0/16"
ALL_NODES="$MASTER_NODE $MINION_NODES"
# Create pv and pvc for two directories that epic requires. /opt/bluedata/db and /opt/bluedata/catalog/bundles
# Create two pvs from bd-nas3
NFS_SERVER="10.2.12.27"
NFS_SHARE="/jungle/kubernetes/swami/bd-epic/opt/catalog"
cat >/tmp/bd-epic-catalog.yaml << EOF
kind: PersistentVolume
apiVersion: v1
metadata:
name: bd-epic-catalog
# Launch dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
# Give full admin privileges to admin
cat >/tmp/dashboard-admin.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
# Become super user
sudo su
# Install haproxy
yum install -y haproxy
# Generate a self-signed certificate
mkdir -p /etc/ssl/certs/haproxy
openssl genrsa -out /etc/ssl/certs/haproxy/server.key 1024
openssl req -new \
-subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=localhost" \