Created
June 23, 2014 21:07
-
-
Save erincerys/43bb534035da9579c8a5 to your computer and use it in GitHub Desktop.
Backup attachment cache and MySQL schema of a Kayako helpdesk deployment to an AWS S3 bucket if there are any changes when compared to the previous backup
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
storagedir='/root/backups' | |
backupdir='/var/www/__swift/files' | |
s3path='live-kayako-caches/backups/' | |
s3cpath="$(which python) /root/s3cmd-1.5.0-beta1/s3cmd" | |
if [ ! -d ${storagedir} ] ; then mkdir ${storagedir} ; fi | |
cd ${storagedir} | |
curdate=$(date -u '+%Y%m%d') | |
prefix='kayako' | |
archivename="${prefix}-files-${curdate}.tgz" | |
archivepath="${storagedir}/${archivename}" | |
mysqlhost='' | |
mysqluser='' | |
mysqlpass='' | |
mysqlschema='kayako' | |
# Logfile generated (not uploaded to s3) | |
logfile="${storagedir}/${prefix}-${curdate}.log" | |
date > $logfile | |
# Assume it will all go well | |
postcode=200 | |
failcode=0 | |
# Start timer | |
total_start_time=$(date +%s) | |
# Generate md5sum of attachments directory | |
dirmd5=$(ls -la ${backupdir} | md5sum | cut -d ' ' -f 1) | |
# Get the latest hast of a ticket action from the db (serving as an md5 for its present state) | |
dbhash=$(mysql -B --skip-column-names -u${mysqluser} -p${mysqlpass} -h${mysqlhost} -e"SELECT actionhash FROM swticketauditlogs ORDER BY ticketauditlogid DESC LIMIT 1;" ${mysqlschema}) | |
# Delete archives in S3 if there are more than 7 | |
function deleteremotes () { | |
curremotebackups=(`s3cmd ls s3://${s3path} | grep $pattern | sort | awk '{print $4}' | cut -d '/' -f 5`) | |
if [ ${#curremotebackups[@]} -ge 8 ] ; then | |
oldremotebackups=$(echo ${curremotebackups[0]} | sed 's/tgz$/*/') | |
echo "[+] Deleting oldest backup archive in S3..." >> $logfile | |
s3cmd del s3://${s3path}${oldremotebackups} | |
# echo ${oldremotebackups} | |
fi | |
} | |
# Delete archives locally in ${storagedir} if there are more than 3 | |
function deletelocals () { | |
curlocalbackups=(`ls -1 | grep $pattern | sort`) | |
if [ ${#curlocalbackups[@]} -ge 8 ] ; then | |
oldlocalbackups=$(echo ${curlocalbackups[@]} | tr " " "\n" | head -n 2) | |
echo "[+] Deleting oldest backup archive(s) locally..." >> $logfile | |
rm ${oldlocalbackups} | |
# echo ${oldlocalbackups} | |
fi | |
} | |
# Compare archive to previous -- do not upload this one if hash comparison shows that they are identical | |
lastarchive=$(ls -1 ${prefix}-files-*.tgz | tac | head -n2 | awk 'NR == 2 { print }' | sed 's/\.tgz$//') | |
if [[ ! -e "${storagedir}/${lastarchive}.md5" ]] || [[ "$(cat ${storagedir}/${lastarchive}.md5)" != "${dirmd5}" ]] ; then | |
echo "[!] Processing attachment backups..." >> $logfile | |
echo "[+] Archiving..." >> $logfile | |
tar cfz ${archivename} ${backupdir} | |
echo "[+] Generating md5..." >> $logfile | |
echo $dirmd5 > "${prefix}-files-${curdate}.md5" | |
echo "[+] Uploading to S3..." >> $logfile | |
result=$($s3cpath put ${prefix}-files-${curdate}.* s3://${s3path} 2>&1) | |
# echo $result | |
# s3cmd 1.50beta1 does content-digest checksum validation by default | |
if [ $(echo "$result" | grep -Pc "(MD5\ Sums\ don\'t\ match|Too\ many\ failures\.\ Giving\ up)") -eq 1 ] ; then | |
failcode=$(($failcode + 100)) | |
postcode=400 | |
echo "$result" >> $logfile | |
fi | |
pattern="${prefix}-files-.*\.tgz" | |
deleteremotes | |
deletelocals | |
else | |
echo "[!] Won't backup attachments as nothing has changed..." >> $logfile | |
fi | |
# Do the same thing for the database dump... I am lazy | |
lastdbdump=$(ls -1 ${prefix}-db-*.sql.gz | tac | head -n2 | awk 'NR == 2 { print }' | sed 's/\.sql\.gz$//') | |
if [[ ! -e "${storagedir}/${lastdbdump}.hash" ]] || [[ "$(cat ${storagedir}/${lastdbdump}.hash)" != "${dbhash}" ]] ; then | |
echo "[!] Processing database backup..." >> $logfile | |
echo "[+] Dumping..." >> $logfile | |
mysqldump -u${mysqluser} -p${mysqlpass} -h${mysqlhost} ${mysqlschema} \ | |
--opt --skip-add-locks --events --skip-lock-tables --routines | gzip > ${prefix}-db-$curdate.sql.gz | |
echo "[+] Generating hash..." >> $logfile | |
echo $dbhash > "${prefix}-db-${curdate}.hash" | |
echo "[+] Uploading to S3..." >> $logfile | |
result=$($s3cpath put ${prefix}-db-${curdate}.* s3://${s3path} 2>&1) | |
# echo $result | |
# s3cmd 1.50beta1 does content-digest checksum validation by default | |
if [ $(echo "$result" | grep -Pc "(MD5\ Sums\ don\'t\ match|Too\ many\ failures\.\ Giving\ up)") -eq 1 ] ; then | |
failcode=$(($failcode + 200)) | |
postcode=400 | |
echo "$result" >> $logfile | |
fi | |
pattern="${prefix}-db-.*\.sql\.gz" | |
deleteremotes | |
deletelocals | |
else | |
echo "[!] Won't backup database as no tickets have changed..." >> $logfile | |
fi | |
total_finish_time=$(date +%s) | |
total_execution_time=$(($total_finish_time - $total_start_time)) | |
# If script takes longer than a specified period, warn, and if even longer, error out! | |
if [[ $total_execution_time -ge 60 && $total_execution_time -lt 300 ]] ; then | |
failcode=50 | |
postcode=300 | |
elif [ $total_execution_time -ge 300 ] ; then | |
failcode=400 | |
postcode=400 | |
fi | |
# Determine error mesages to send | |
case $failcode in | |
0) postmsg="Success%20(${total_execution_time}s%20%3C%2060s)" ;; | |
50) postmsg="Slow%20run%20(300s%20%3C%20${total_execution_time}s%20%3E%3D%2060s)" ;; | |
100) postmsg='Error!%20Attachment%20upload%failed' ;; | |
200) postmsg='Error!%20DB%20upload%failed' ;; | |
300) postmsg='Error!%20Attachment%20and%20DB%20uploads%20failed' ;; | |
400) postmsg="Very%slow%20run%20(${total_execution_time}s%20%3E%3D%20300s)" ;; | |
esac | |
echo "[!] Done!" >> $logfile | |
echo "[+] $postmsg" >> $logfile | |
## Add your own post logic to a monitoring system to report the status and message of this script's run |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment