Skip to content

Instantly share code, notes, and snippets.

@vanjos
Last active December 20, 2015 17:39
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save vanjos/6169734 to your computer and use it in GitHub Desktop.
Save vanjos/6169734 to your computer and use it in GitHub Desktop.
CCM Installation to have the stack work FAST
#!/bin/bash
###
#
# Find out which ami to use on http://www.alestic.com
#
# at the time of this writing, Ubuntu 12.04 (Precise) was ami-23d9a94a
# with EBS boot
#
# In order for this to work, tuned your firewall for the following:
#
# 7000-7001
# 1024 - 65535
# 7199 TCP (JMX)
# 8012 TCP
# 8983 TCP
# 9042 TCP (BINARY PROTOCOL --- CQL3)
# 9160 TCP (THRIFT PROTOCOL)
# 22
# 8443
# 8888 (OPSCENTER)
#
###
#
### NUMBER OF NODES YOU'LL WANT
NUM_NODES=3
### If this is going to be DEV --- probably just LVM, rather than a ton of disks
# install LVM, ntp
sudo apt-get -y -qq install lvm2 git ntp
# fix date
sudo /etc/init.d/ntp stop && sudo ntpdate pool.ntp.org && sudo /etc/init.d/ntp start
# AWS Ubuntu instances take one of your ephemeral disks and mount it to /mnt by default
if [ "`mount | grep xvdb`" ]; then
mntpoint=$(mount | grep xvdb | awk '{print $3}')
sudo umount $(mount | grep xvdb | awk '{print $1}')
sudo rm -rf $mntpoint
fi
if [ "`grep xvdb /etc/fstab`" ]; then
sudo sed -i -e '/xvdb/d' /etc/fstab
fi
###
# Now let's find out which disk(s) we can use for LVM
#
# will end up with one (or multiple) listings such as:
# 202 32 440366080 xvdc
###
cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)"
###
# if you want to automate instead
# UNCOMMENT THE NEXT SET OF LINES JUST BELOW
###
OIFS=$IFS
IFS='$\n'
for disk in $(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)"); do
size=$(echo $disk | awk '{printf "%0.f", $(NF-1)/1024/1024}')
echo "CREATING PHYSICAL GROUP FOR: /dev/$(echo $disk | awk '{print $NF}') --- SIZE: ${size}GB"
sudo pvcreate /dev/$(echo $disk | awk '{print $NF}')
done
IFS=$OIFS
# total size available for LVM
data_size=$(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)" | awk '{print $(NF-1)}' | awk '{sum+=$1} END {print sum*.6/'${NUM_NODES}'/1024/1024}')
clog_size=$(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)" | awk '{print $(NF-1)}' | awk '{sum+=$1} END {print sum*.3/'${NUM_NODES}'/1024/1024}')
# Now create volume groups & logical volumes
sudo vgcreate cassandra $(for disk in $(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)" | awk '{print $NF}'); do echo -n "/dev/$disk "; done)
for i in $(seq -w $NUM_NODES); do
sudo lvcreate --size ${data_size}G -n data$i cassandra
sudo lvcreate --size ${clog_size}G -n clog$i cassandra
#data${i}_minor=$(sudo lvdisplay /dev/cassandra/data$i | grep Block | awk '{print $NF}' | awk -F: '{print $2}')
#clog${i}_minor=$(sudo lvdisplay /dev/cassandra/clog$i | grep Block | awk '{print $NF}' | awk -F: '{print $2}')
done
#block_num=$(sudo lvdisplay /dev/cassandra/data | grep Block | awk '{print $NF}' | awk -F: '{print $1}')
# enable multiverse
OIFS=$IFS
IFS=$'\n'
for line in $(grep multiverse$ /etc/apt/sources.list); do
newline=$(echo $line | sed 's/# //')
sudo sed -i -e 's,'"$line"','"$newline"',' /etc/apt/sources.list
done
IFS=$OIFS
sudo apt-get -qq update
### install XFS and MDADM to RAID0 ephemeral disks
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -qq install mdadm xfsprogs python-pip
sudo pip install cql PyYAML
# RAIIIIIIIIIIIIIID
# sudo /sbin/mdadm --create -l0 -n2 -c 256 /dev/md0 /dev/xvdc /dev/xvdd
# sudo mkdir /var/lib/cassandra
# echo "/dev/md0 /var/lib/cassandra xfs noatime,comment=raid-eph-cassandra 0 0" | sudo tee -a /etc/fstab
# echo DEVICE /dev/xvdb /dev/xvdc | sudo tee /etc/mdadm/mdadm.conf
# sudo mkfs.xfs /dev/md0
# sudo mount /var/lib/cassandra
# sudo blockdev --setra 65536 /dev/md0
# Make sure our newly made LVMs are XFS
for i in $(seq -w 1 $NUM_NODES); do
sudo mkfs.ext4 /dev/cassandra/data$i
sudo mkfs.ext4 /dev/cassandra/clog$i
#sudo mkdir -p /home/ubuntu/.ccm/My_DEV_Cluster/node${i}/data
#sudo mkdir -p /home/ubuntu/.ccm/My_DEV_Cluster/node${i}/commitlogs
echo "/dev/cassandra/data$i /home/ubuntu/.ccm/My_DEV_Cluster/node${i}/data auto rw,noatime,nouser_xattr,barrier=0,data=ordered,errors=remount-ro,comment=raid-eph-cassandra-data 0 1" | sudo tee -a /etc/fstab
echo "/dev/cassandra/clog$i /home/ubuntu/.ccm/My_DEV_Cluster/node${i}/commitlogs auto rw,noatime,nouser_xattr,barrier=0,data=ordered,errors=remount-ro,comment=raid-eph-cassandra-log 0 1" | sudo tee -a /etc/fstab
done
# allow installs of Datastax
echo "deb http://debian.datastax.com/community stable main" | sudo tee -a /etc/apt/sources.list
curl -L http://debian.datastax.com/debian/repo_key | sudo apt-key add -
# Install Opscenter
sudo apt-get -qq update
sudo apt-get -y -qq install libssl0.9.8 opscenter-free
# allow it to listen on public interface
sudo sed -i -e 's/interface = 127.0.0.1/interface = 0.0.0.0/' /etc/opscenter/opscenterd.conf
### START'ER UP!
sudo service opscenterd start
###
#
# And you're just about ready to go. You can now use OpsCenter to install any cluster
# of machines you'd like. By giving it your EC2 credentials, it will launch a whole
# new set of machines for you, OR you can point already existing machines to it.
#
###
###
#
# Installing CCM from git clone https://github.com/pcmanus/ccm.git
#
###
#sudo apt-add-repository -y ppa:webupd8team/java
#sudo apt-get -qq update
#sudo apt-get -y -qq install default-jdk
#sudo apt-get -y -qq install ant
sudo add-apt-repository -y ppa:webupd8team/java
sudo apt-get -qq update
sudo apt-get install -y -qq oracle-java6-installer
sudo update-alternatives --config java
sudo apt-get -y install ant
cd ~
git clone https://github.com/pcmanus/ccm.git
cd ccm
sudo ./setup.py install
cd ..
# finally install the version you'd like --- going with 1.2.8 now
ccm create My_DEV_Cluster -v 1.2.8
ccm populate -n 3
# mount right over the installation
sudo mount -a
sudo chown -R ubuntu:ubuntu /home/ubuntu/.ccm/My_DEV_Cluster/node*
# NETWORKING (loopback aliases)
#for i in $(seq -w 2 $NUM_NODES); do
# sudo ifconfig lo:$i 127.0.0.$i netmask 255.0.0.0 up
#done
ccm start
###
# For opscenter-agent installs to work, must have installed your own ssh key:
# using ssh-keygen -t rsa (to create one)
# into the authorized_keys file...
#
# ssh-keygen -t rsa
# cat ~/.ssh/id_rsa.pub | tee -a ~/.ssh/authorized_keys
####
ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""
cat ~/.ssh/id_rsa.pub | tee -a ~/.ssh/authorized_keys
# add this when trying to install opscenter-agent through opscenter
cat ~/.ssh/id_rsa
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment