Last active
August 29, 2015 14:11
-
-
Save erincerys/83789cd18174dda1b7fc to your computer and use it in GitHub Desktop.
An rc.local script tailored for MySQL servers running on AWS EC2 i2.xlarge instances. It will create the partition table on ephemeral storage, format the disk ext4, download a data store from S3, and start the MySQL server.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/sh -e | |
# This is a draft and may not work for you. | |
# Dependencies: | |
# - aws python cli toolset | |
# - qpress / quicklz | |
# - s3-mutlipart.py | |
# Search an array for an element | |
## http://stackoverflow.com/a/8574392/2272443 | |
containsElement () { | |
local e | |
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done | |
return 1 | |
} | |
# Set the disk you want to partition and store data on | |
LOGICAL_DISK='xvdb' | |
# Try to crate the ephemeral storage if it doesn't already exist, and isn't mounted | |
if [ $(mount | grep -c \/mnt) -eq 0 ] ; then | |
# If the disk hasn't been partition, do that | |
if [ $(parted /dev/${LOGICAL_DISK} print | grep -c primary) -eq 0 ] ; then | |
# Get the offset to align the partition by | |
## http://h10025.www1.hp.com/ewfrf/wc/document?cc=uk&lc=en&dlc=en&docname=c03479326 | |
POWER_OF_TWO_LIST=(1,2,4,8,16,32,64,128,256,512,1024,2048,4096,8192,16384,32768,65536,131072,262144,524288,1048576,2097152,4194304,8388608,16777216,33554432,67108864,134217728,268435456,536870912,1073741824,2147483648) | |
if [[ $(cat /sys/block/${LOGICAL_DISK}/alignment_offset) -eq 0 && $(containsElement `cat /sys/block/${LOGICAL_DISK}/queue/minimum_io_size` "${POWER_OF_TWO_LIST[@]}") -eq 1 ]] ; then | |
BLOCK_OFFSET=1 | |
elif [[ $(cat /sys/block/${LOGICAL_DISK}/queue/optimal_io_size) -ne 0 ]] ; then | |
BLOCK_OFFSET="$((($(cat /sys/block/${LOGICAL_DISK}/queue/optimal_io_size) + $(cat /sys/block/${LOGICAL_DISK}/alignment_offset))/$(cat /sys/block/${LOGICAL_DISK}/queue/physical_block_size)))s" | |
elif [[ $(cat /sys/block/${LOGICAL_DISK}/queue/minimum_io_size) -ne 0 ]] ; then | |
BLOCK_OFFSET="$(cat /sys/block/${LOGICAL_DISK}/queue/minimum_io_size)s" | |
else | |
BLOCK_OFFSET="$(cat /sys/block/${LOGICAL_DISK}/queue/physical_block_size)s" | |
fi | |
parted /dev/${LOGICAL_DISK} mklabel gpt | |
parted /dev/${LOGICAL_DISK} mkpart primary ext2 ${BLOCK_OFFSET} -- -1 | |
# Format the partition | |
mkfs.ext4 /dev/${LOGICAL_DISK}1 | |
fi | |
# Mount with transactional jounral | |
## To be able to disable the double-write buffer in MySQL server | |
mount -t ext4 -o rw,data=journal /dev/${LOGICAL_DISK}1 /mnt | |
fi | |
# Set up S3 location of where to grab the MySQL datastore from | |
S3_PATH='s3://../' | |
S3_DESTINATION='reporting-datastore.qp' | |
# Size of archive for consistency check | |
DATASTORE_SIZE=95388761246 | |
if [ $(mount | grep -c \/mnt) -eq 1 ] ; then | |
cd /mnt | |
# create dir for ftp user | |
mkdir transfers | |
chown dropbox:dropbox transfers | |
# create dir for mysql dumps | |
mkdir dumps | |
chown mysql:mysql dumps | |
# download and import datastore | |
## DEPENDS ON s3-multipart | |
## https://github.com/mumrah/s3-multipart | |
## 2014-12-18 Needed this fix https://github.com/mumrah/s3-multipart/issues/20 | |
/usr/bin/python /root/s3-multipart/s3-mp-download.py -np 2 -v ${S3_PATH}/${S3_DESTINATION} ./ > /mnt/download.log | |
S3_FILESIZE=$(/usr/bin/aws --region us-east-1 s3 ls ${S3_PATH} | grep ${S3_DESTINATION} ) | |
if [ $(du ${S3_DESTINATION} | awk '{print $1}') -eq ${DATASTORE_SIZE} ] ; then | |
# quicklz is used for fast compression | |
qpress -d -v ${S3_DESTINATION} ./ > /mnt/extract.log | |
chown -R mysql:mysql mysql/ | |
# start mysql daemon | |
/usr/sbin/service mysql start | |
# remove the datastore archive | |
rm ${S3_DESTINATION} | |
else | |
exit 1 | |
fi | |
cd - | |
else | |
exit 1 | |
fi | |
exit 0 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment