Skip to content

Instantly share code, notes, and snippets.

@vanjos
Last active April 4, 2017 10:51
Show Gist options
  • Save vanjos/5481606 to your computer and use it in GitHub Desktop.
Save vanjos/5481606 to your computer and use it in GitHub Desktop.
Very quick and easy Cassandra (1.2) Installation on an EC2 (or really any) Ubuntu offering.
#!/bin/bash
###
#
# Find out which ami to use on http://www.alestic.com
#
# at the time of this writing, Ubuntu 12.04 (Precise) was ami-23d9a94a
# with EBS boot
#
# In order for this to work, tuned your firewall for the following:
#
# 7000-7001
# 1024 - 65535
# 7199 TCP (JMX)
# 8012 TCP
# 8983 TCP
# 9042 TCP (BINARY PROTOCOL --- CQL3)
# 9160 TCP (THRIFT PROTOCOL)
# 22
# 8443
# 8888 (OPSCENTER)
#
###
#
### If this is going to be DEV --- probably just LVM, rather than a ton of disks
# install LVM, ntp
sudo apt-get -y -qq install lvm2 ntp
# fix date
sudo /etc/init.d/ntp stop && sudo ntpdate pool.ntp.org && sudo /etc/init.d/ntp start
# AWS Ubuntu instances take one of your ephemeral disks and mount it to /mnt by default
if [ "`mount | grep xvdb`" ]; then
mntpoint=$(mount | grep xvdb | awk '{print $3}')
sudo umount $(mount | grep xvdb | awk '{print $1}')
sudo rm -rf $mntpoint
fi
if [ "`grep xvdb /etc/fstab`" ]; then
sudo sed -i -e '/xvdb/d' /etc/fstab
fi
###
# Now let's find out which disk(s) we can use for LVM
#
# will end up with one (or multiple) listings such as:
# 202 32 440366080 xvdc
###
cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)"
###
# if you want to automate instead
# UNCOMMENT THE NEXT SET OF LINES JUST BELOW
###
OIFS=$IFS
IFS='$\n'
for disk in $(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)"); do
# size=$(echo $disk | awk '{printf "%0.f", $(NF-1)/1024/1024}')
# echo "CREATING PHYSICAL GROUP FOR: /dev/$(echo $disk | awk '{print $NF}') --- SIZE: ${size}GB"
sudo pvcreate /dev/$(echo $disk | awk '{print $NF}')
done
IFS=$OIFS
# total size available for LVM
data_size=$(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)" | awk '{print $(NF-1)}' | awk '{sum+=$1} END {print sum*.6/1024/1024}')
clog_size=$(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)" | awk '{print $(NF-1)}' | awk '{sum+=$1} END {print sum*.2/1024/1024}')
# Now create volume groups & logical volumes
sudo vgcreate cassandra $(for disk in $(cat /proc/partitions | grep -vP "($(df -lh | grep -oP '(s|xv)d.\d?' | tr '\n' '|' | sed -e 's/|$//')|^$|major)" | awk '{print $NF}'); do echo -n "/dev/$disk "; done)
sudo lvcreate --size ${data_size}G -n data cassandra
sudo lvcreate --size ${clog_size}G -n clog cassandra
#block_num=$(sudo lvdisplay /dev/cassandra/data | grep Block | awk '{print $NF}' | awk -F: '{print $1}')
#data_minor=$(sudo lvdisplay /dev/cassandra/data | grep Block | awk '{print $NF}' | awk -F: '{print $2}')
#clog_minor=$(sudo lvdisplay /dev/cassandra/clog | grep Block | awk '{print $NF}' | awk -F: '{print $2}')
# enable multiverse
OIFS=$IFS
IFS=$'\n'
for line in `grep multiverse$ /etc/apt/sources.list`; do
newline=$(echo $line | sed 's/# //')
sudo sed -i -e 's,'"$line"','"$newline"',' /etc/apt/sources.list
done
IFS=$OIFS
sudo apt-get -qq update
### install XFS and MDADM to RAID0 ephemeral disks
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -qq install mdadm xfsprogs
# RAIIIIIIIIIIIIIID
# sudo /sbin/mdadm --create -l0 -n2 -c 256 /dev/md0 /dev/xvdc /dev/xvdd
# sudo mkdir /var/lib/cassandra
# echo "/dev/md0 /var/lib/cassandra xfs noatime,comment=raid-eph-cassandra 0 0" | sudo tee -a /etc/fstab
# echo DEVICE /dev/xvdb /dev/xvdc | sudo tee /etc/mdadm/mdadm.conf
# sudo mkfs.xfs /dev/md0
# sudo mount /var/lib/cassandra
# sudo blockdev --setra 65536 /dev/md0
# Make sure our newly made LVMs are XFS
sudo mkfs.xfs /dev/cassandra/data
sudo mkfs.xfs /dev/cassandra/clog
sudo mkdir -p /var/lib/cassandra/data
sudo mkdir -p /var/lib/cassandra/commitlog
echo "/dev/cassandra/data /var/lib/cassandra/data xfs noatime,comment=raid-eph-cassandra 0 0" | sudo tee -a /etc/fstab
echo "/dev/cassandra/clog /var/lib/cassandra/commitlog xfs noatime,comment=raid-eph-cassandra 0 0" | sudo tee -a /etc/fstab
sudo mount -a
sudo chown -R cassandra:cassandra /var/lib/cassandra/
# allow installs of Datastax
echo "deb http://debian.datastax.com/community stable main" | sudo tee -a /etc/apt/sources.list
curl -L http://debian.datastax.com/debian/repo_key | sudo apt-key add -
# Install Opscenter
sudo apt-get -qq update
sudo apt-get -y -qq install libssl0.9.8 opscenter-free
# allow it to listen on public interface
sudo sed -i -e 's/interface = 127.0.0.1/interface = 0.0.0.0/' /etc/opscenter/opscenterd.conf
### START'ER UP!
sudo service opscenterd start
###
#
# And you're just about ready to go. You can now use OpsCenter to install any cluster
# of machines you'd like. By giving it your EC2 credentials, it will launch a whole
# new set of machines for you, OR you can point already existing machines to it.
#
###
###
#
# Installing Cassandra 1.2.x on this machine
#
###
# let's install DSC12 (DataStax Cassandra 1.2)
sudo apt-get -y -qq install dsc12
###
#
# Now that it's installed... Configure it just a wee bit
#
# Change /etc/default/dse (for hadoop - HADOOP_ENABLED=1, for Solr - SOLR_ENABLED=1, Each node should be ONE role
#
###
# There are a lot of Cassandra options, so look them up, but bare minimum:
sudo sed -i -e "/^rpc_address/c\rpc_address: 0.0.0.0" -e "/^initial_token/c# initial_token:" /etc/cassandra/cassandra.yaml
# start cassandra up
sudo service cassandra start
###
# For opscenter-agent installs to work, must have installed your own ssh key:
# using ssh-keygen -t rsa (to create one)
# into the authorized_keys file...
#
# ssh-keygen -t rsa
# cat ~/.ssh/id_rsa.pub | tee -a ~/.ssh/authorized_keys
####
ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ""
cat ~/.ssh/id_rsa.pub | tee -a ~/.ssh/authorized_keys
# add this when trying to install opscenter-agent through opscenter
cat ~/.ssh/id_rsa
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment