Skip to content

Instantly share code, notes, and snippets.

@colinmollenhour
Last active July 20, 2022 11:26
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 2 You must be signed in to fork a gist
  • Save colinmollenhour/16d8d92d2ecddddcfa7f0a991d11e830 to your computer and use it in GitHub Desktop.
Save colinmollenhour/16d8d92d2ecddddcfa7f0a991d11e830 to your computer and use it in GitHub Desktop.
Innobackupex/XtraBackup Helper
#!/bin/bash
# ###################################
# Innodb Snapshot Helper
# ###################################
# Wrapper around innobackupex to either:
# - Take incremental snapshots that are immediately applied to the base snapshot
# - Apply a series of incremental snapshots to a base snapshot
# - Stream an incremental snapshot
#
set -e
##Usage: <command> [<dir>] [innobackupex_options]
##
action=$1
shift || true
if [[ $1 =~ ^-- ]]; then
dir=$(readlink -f $PWD)
else
dir=$(readlink -f ${1:-$PWD})
shift || true
fi
function check_base {
[[ -d $1 ]] \
|| { echo "The base backup dir doesn't exist: $1"; exit 1; }
[[ -f $1/xtrabackup_checkpoints ]] \
|| { echo "$1 does not contain a backup."; exit 1; }
grep -qF 'backup_type = full-' $1/xtrabackup_checkpoints \
|| { echo "The base backup is not a full backup."; exit 1; }
}
##Commands:
##
case "$action" in
## install Install Percona XtraBackup (Ubuntu 16.04 only)
install)
VERSION=2.4.5
[[ $(whoami) = "root" ]] || { echo "Must be root."; exit 1; }
apt-get update
apt-get install libdbd-mysql-perl rsync libaio1 libc6 libcurl3 libev4 libgcc1 libgcrypt20 libssl1.0.0 libstdc++6 zlib1g libdbi-perl libmysqlclient20 mysql-common
wget -O /tmp/xtrabackup.deb https://www.percona.com/downloads/XtraBackup/Percona-XtraBackup-$VERSION/binary/debian/xenial/x86_64/percona-xtrabackup-24_$VERSION-1.xenial_amd64.deb
dpkg -i /tmp/xtrabackup.deb
wget -O /tmp/qpress.tar http://www.quicklz.com/qpress-11-linux-x64.tar
tar -C /usr/local/bin -xf /tmp/qpress.tar qpress
chmod +x /usr/local/bin/qpress
rm /tmp/xtrabackup.deb /tmp/qpress.tar
echo "XtraBackup $VERSION has been installed!"
;;
## base Take a fresh full snapshot
base)
[[ -d $dir/base ]] && { echo "$dir/base already exists."; exit 1; }
innobackupex --no-timestamp $dir/base "$@"
cp $dir/base/xtrabackup_info $dir/base/xtrabackup_info.orig
;;
## incr-apply Take an incremental snapshot and apply it to the base and delete
## the intermediate snapshots.
incr-apply)
check_base $dir/base
if grep -qF 'backup_type = full-backuped' $dir/base/xtrabackup_checkpoints; then
echo "Applying log to base snapshot..."
innobackupex --apply-log --redo-only $dir/base
rm $dir/base/xtrabackup_logfile
fi
TS=$(date +%Y-%m-%d_%H-%M-%S)
innobackupex --incremental --no-timestamp $dir/$TS --incremental-basedir=$dir/base "$@"
echo "Applying log to base from $TS..."
innobackupex --apply-log --redo-only $dir/base --incremental-dir=$dir/$TS
# Update checkpoints file based on merged incremental
echo "backup_type = full-prepared" > $dir/checkpoints.tmp
grep -F '_lsn' $dir/$TS/xtrabackup_checkpoints >> $dir/checkpoints.tmp
echo "compact = 0" >> $dir/checkpoints.tmp
mv $dir/base/xtrabackup_checkpoints $dir/base/xtrabackup_checkpoints.old
mv $dir/checkpoints.tmp $dir/base/xtrabackup_checkpoints
cp $dir/base/xtrabackup_info $dir/base/xtrabackup_info.old
mv $dir/$TS/xtrabackup_info $dir/base
mv $dir/$TS/xtrabackup_logfile $dir/base
rm -rf $dir/$TS
# Make master info easy to find
MLFILE=$(awk '/binlog_pos = /{print $4}' $dir/base/xtrabackup_info | tr -d ",'")
MLPOS=$(awk '/binlog_pos = /{print $6}' $dir/base/xtrabackup_info)
echo "MASTER_LOG_FILE='$MLFILE', MASTER_LOG_POS=$MLPOS"
echo "CHANGE MASTER TO MASTER_LOG_FILE='$MLFILE', MASTER_LOG_POS=$MLPOS" > $dir/base/xtrabackup_master_info
;;
## incr-only Take an incremental snapshot since the last one and do not
## apply logs to it. Use this in conjunction with 'apply-all'.
incr-only)
check_base $dir/base
last=$dir/base
for incr in $dir/20*; do [ -d $incr ] && last=$incr; done
innobackupex --incremental $dir/ --incremental-basedir=$last "$@"
;;
## apply-all From a snapshot set with no logs applied, apply all logs.
## The rollback phase will be applied so no further incremental
## backups can be taken from this point forward.
apply-all)
check_base $dir/base
TARGET=$dir/applied-$(date +%Y-%m-%d)
[[ -d $TARGET ]] && { echo "$TARGET already exists."; exit 1; }
last=$dir/base
for incr in $dir/20*; do [ -d $incr ] && last=$incr; done
if [ "$last" = "$dir/base" ]; then
innobackupex --apply-log $dir/base "$@"
else
innobackupex --apply-log --redo-only $dir/base "$@"
for incr in $dir/20*; do
if [ "$last" = "$incr" ]; then
innobackupex --apply-log $dir/base --incremental-dir=$incr "$@"
rm $dir/base/xtrabackup_info.latest
rm -rf $incr
else
innobackupex --apply-log --redo-only $dir/base --incremental-dir=$incr "$@"
cp $incr/xtrabackup_info $dir/base/xtrabackup_info.latest
rm -rf $incr
fi
done
fi
mv $dir/base $TARGET
# Make master info easy to find
MLFILE=$(awk '/binlog_pos = /{print $4}' $TARGET/xtrabackup_info | tr -d ",'")
MLPOS=$(awk '/binlog_pos = /{print $6}' $TARGET/xtrabackup_info)
echo "MASTER_LOG_FILE='$MLFILE', MASTER_LOG_POS=$MLPOS"
echo "CHANGE MASTER TO MASTER_LOG_FILE='$MLFILE', MASTER_LOG_POS=$MLPOS" > $TARGET/xtrabackup_master_info
;;
## stream Run an incremental through a stream. Pipe this through like so:
## $ stream > incr.xbstream; xbstream -x -C /backup-dir/ < incr.xbstream
## $ stream | ssh <host> xbstream -x -C /backup-dir/
stream)
check_base $dir/base
last=$dir/base
for incr in $dir/20*; do last=$incr; done
LSN=$(awk '/to_lsn/{print $3}' $last/xtrabackup_checkpoints)
[ -z "$LSN" ] && { echo "Could not get LSN."; exit 1; }
innobackupex --incremental --incremental-lsn=$LSN \
--compress --compress-threads=4 --parallel=4 \
--stream=xbstream $last "$@"
;;
## compress Compress a directory with qpress
compress)
[[ -f $dir/ibdata1 ]] || { echo "$dir does not contain a backup."; exit 1; }
find $dir -maxdepth 1 -type f -name ib\* -exec qpress {} {}.qp \; -exec rm {} \;
find $dir -maxdepth 2 -type f -name \*.ibd -exec qpress {} {}.qp \; -exec rm {} \;
;;
## decompress Decompress a directory compressed with innobackupex --compress
decompress)
[[ -f $dir/ibdata1.qp ]] || { echo "$dir does not contain a compressed backup."; exit 1; }
innobackupex --decompress $dir "$@"
find $dir -name \*.qp -delete
#find $dir -type f -name \*.qp -exec sh -c "qpress -d \$(echo {} | sed 's/\.qp$//')" \; -exec rm {} \;
;;
# All other
--help|*)
[[ $action = "--help" ]] || echo "Unrecognized command: $1"
awk -F '##' '/^ *##/{print $2}' ${BASH_SOURCE[0]}
exit 1
;;
esac
##
##Useful innobackupex options:
## --compress-threads=n Use more than 1 thread during compresson
## --parallel=n Use more than 1 thread during decompression
## --rsync Use rsync instead of cp for copying table files
## --safe-slave-backup Stop slave thread while performing backup
## --slave-info Write slave's master info to xtrabackup_slave_info
##
##-------------------------------------------------------------------
##There are many ways a backup can be done, but here is an example:
##
## 1. Take a base snapshot (base), and then one or more incremental snapshots (incr-only) to update the base.
##
## $ ./innodb-snapshot.sh base
## $ ./innodb-snapshot.sh incr-only
##
## 2. Apply the logs to the snapshot and incremental snapshots to make it consistent (apply-all).
##
## $ ./innodb-snapshot.sh apply-all
##
## 3. (optional) Compress the backup.
##
## $ ./innodb-snapshot.sh compress snapshots/applied-$(date +%Y-%m-%d)
##
## 4. Rsync the backup to the new slave.
##
## $ rsync -avP -z --compress-level=1 -e "ssh -T -o Compression=no" \
## snapshots/applied-2017-02-15/ slave:/root/innodb-snapshot/
##
## 5. On the slave, decompress the backup and move it's contents into the MySQL data directory.
##
## $ innobackupex --decompress /root/innodb-snapshot
## $ docker volume create --name mysql-data
## $ mv /root/innodb-snapshot/* /var/lib/docker/volumes/mysql-data/_data/
## $ chown -R 999:docker /var/lib/docker/volumes/mysql-data/_data/
##
##Another example: stream a new backup directly to another server:
##
## $ innobackupex --stream=xbstream --compress --compress-threads=3 --parallel=3 . \
## | ssh -T -o Compression=no root@host 'cat - > backup.xbstream'
##
## and then on the remote host:
##
## $ xbstream -C /var/lib/docker/volumes/mysql-data/_data -x < backup.xbstream
##
## or to transfer compressed using one command:
##
## $ innobackupex --stream=xbstream --compress --compress-threads=3 --parallel=3 . \
## | ssh -T -o Compression=no root@host 'xbstream -c /var/lib/docker/volumes/mysql-data/_data -x'
##
## Then decompress and apply logs:
##
## $ innobackupex --decompress --parallel=4 /var/lib/docker/volumes/mysql-data/_data
## $ innobackupex --apply-log /var/lib/docker/volumes/mysql-data/_data
## $ chown -R 999:docker /var/lib/docker/volumes/mysql-data/_data
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment