Skip to content

Instantly share code, notes, and snippets.

@jfqd
Last active October 27, 2019 13:40
Show Gist options
  • Save jfqd/05727556f62f0769a030cbf6421516fa to your computer and use it in GitHub Desktop.
Save jfqd/05727556f62f0769a030cbf6421516fa to your computer and use it in GitHub Desktop.
zfs backup solution
#!/usr/bin/env ruby
# install ruby requirements with:
# gem install terrapin
require 'terrapin'
require 'logger'
# Usage:
# DELETE_SNAPSHOTS=false DEBUG=true zfsbackup backup-storage.example.com local/pool remote/pool
# zfs extensions for local/pool:
# zfs allow -ld -u jerry hold,release,send,snapshot,destroy,mount local/pool
# zfs extensions for remote/pool:
# zfs allow -ld -u admin compression,mountpoint,create,destroy,hold,mount,receive,release,rename,userprop,aclmode,aclinherit,quota,refquota,sharenfs remote/pool
# enable compression on remote/pool:
# zfs set compression=on remote/pool
# restore settings on remote/pool:
# zfs unallow admin remote/pool
# 0,15,30,45 * * * * [ -x /opt/tools/bin/zfsbackup ] && /opt/tools/bin/zfsbackup backup-storage.example.com local/pool remote/pool 2>&1 1>>/vat/log/backup.log
ZFS = "/usr/sbin/zfs"
SSH = "/usr/bin/ssh"
GREP = "/usr/bin/grep"
PFEXEC = "/usr/bin/pfexec"
SKIP_REPOS = %w[]
DEBUG = ENV['DEBUG'] == 'true' ? true : false
LOGGER = Logger.new(STDOUT)
DELETE_SNAPSHOTS = ENV['DELETE_SNAPSHOTS'] == 'false' ? false : true
SNAPSHOT_CONFIG = "#{Dir.home}/.zfsbackup"
LOCK_FILE = "#{Dir.home}/.zfsbackup.lock"
if ARGV.size != 3
puts "USAGE: zfsbackup host src_dataset dest_dataset"
exit 1
end
host = ARGV.shift
src = ARGV.shift
dest = ARGV.shift
def execute(cmd, options="", args={})
line = Terrapin::CommandLine.new(
cmd,
options,
expected_outcodes: [0,1],
logger: LOGGER
)
begin
return line.run(args)
rescue Terrapin::ExitStatusError => e
STDERR.puts "Error: #{e.message}"
return nil
rescue Terrapin::CommandNotFoundError => e
STDERR.puts "Error: #{e.message}"
return nil
end
end
#
def datasets(dataset,host=nil)
puts "List dataset: #{host == nil? ? '-' : host}:#{dataset} ... " if DEBUG == true
if host.nil?
cmd = ZFS
options = %[list -H -o name -r :dataset]
args = {dataset: dataset}
else
cmd = SSH
options = %[:host :pfexec :zfs list -H -o name -r :dataset]
args = {dataset: dataset, host: host, pfexec: PFEXEC, zfs: ZFS}
end
result = execute(cmd, options, args)
result = result.nil? ? [] : result.split("\n")
# remove any repositories in the skip list
SKIP_REPOS.each { |d| result.delete("storage/shared/#{d}") }
# remove the base of the datasets from the array
result.delete(dataset)
puts "List of datasets: #{result.inspect}" if DEBUG == true
result
end
def dataset_exists?(dataset,host=nil)
datasets(dataset,host).size > 0
end
def get_latest_snapshot(path)
File.exists?(path) ? File.read(path).gsub("\n","") : nil
end
def set_latest_snapshot(path,snapshot)
File.write(path, snapshot)
end
def create_snapshot(dataset)
puts "Create snapshot: #{dataset} ... " if DEBUG == true
cmd = ZFS
options = %[snapshot -r :dataset]
args = {dataset: dataset}
success = execute(cmd, options, args)
return !success.nil?
end
def create_dataset(dataset,host)
puts "Create dataset: #{dataset} ... " if DEBUG == true
cmd = SSH
options = %[:host :pfexec :zfs create :dataset]
args = {dataset: dataset, host: host, pfexec: PFEXEC, zfs: ZFS}
success = execute(cmd, options, args)
return !success.nil?
end
def snapshots(dataset,snapshot)
cmd = ZFS
options = %[list -H -o name -t all -r :dataset | :grep :snapshot]
args = {
dataset: dataset,
snapshot: snapshot,
grep: GREP
}
result = execute(cmd, options, args)
result.nil? ? [] : result.split("\n")
end
def full_backup(host,dataset,src,dest,snapshot)
puts "Create full backup" if DEBUG == true
_dest = dataset == src ? dest : "#{dest}#{dataset.gsub(src,"")}"
cmd = ZFS
options = %[send :local_snapshot | :ssh :host :pfexec :zfs receive -F :remote_dataset]
args = {
local_snapshot: "#{dataset}@#{snapshot}",
host: host,
remote_dataset: _dest,
ssh: SSH,
pfexec: PFEXEC,
zfs: ZFS
}
success = execute(cmd, options, args)
return !success.nil?
end
def incremental_backup(host,dataset,src,dest,latest_snapshot,snapshot)
puts "Create incremental backup" if DEBUG == true
_dest = dataset == src ? dest : "#{dest}#{dataset.gsub(src,"")}"
cmd = ZFS
options = %[send -i :latest_snapshot :current_snapshot | :ssh :host :pfexec :zfs receive -F -d :remote_dataset]
args = {
latest_snapshot: latest_snapshot,
current_snapshot: "#{dataset}@#{snapshot}",
host: host,
remote_dataset: _dest,
ssh: SSH,
pfexec: PFEXEC,
zfs: ZFS
}
success = execute(cmd, options, args)
STDERR.puts "incremental_backup: #{success.inspect}" if DEBUG == true
return !success.nil?
end
def destroy_snapshot(snapshot,host=nil)
puts "destroy snapshot: #{snapshot}" # if DEBUG == true
if host.nil?
cmd = ZFS
options = %[destroy :snapshot]
args = {snapshot: snapshot}
else
cmd = SSH
options = %[:host :pfexec :zfs destroy :snapshot]
args = {snapshot: snapshot, host: host, pfexec: PFEXEC, zfs: ZFS}
end
result = execute(cmd, options, args)
end
def create_lock
%x[/usr/bin/touch #{LOCK_FILE}]
end
def lock_present?
File.exists?(LOCK_FILE)
end
def delete_lock
%x[/usr/bin/rm #{LOCK_FILE}]
end
#####################
## main
##
if lock_present?
STDERR.puts "Error: Process already running!"
exit 1
else
create_lock
end
# validate src dataset
datasets = datasets(src)
unless datasets.size > 0
STDERR.puts "Error: local dataset #{src} does not exist"
exit 1
end
# validate dest dataset
unless datasets(dest,host).size > 0
create_dataset(dest,host)
puts "create missing remote dataset #{host}:#{dest}" if DEBUG == true
unless datasets(dest,host).size > 0
STDERR.puts "ERROR: remote dataset #{host}:#{dest} could not be created"
exit 1
end
end
t = Time.now
date = t.strftime "%Y%m%d-%H%M%S"
yesterday = (t - 86400).strftime "%Y%m%d-%H"
old_snapshot = get_latest_snapshot(SNAPSHOT_CONFIG)
new_snapshot = "zfsbackup-#{date}"
puts "latest_snapshot : #{old_snapshot}" if DEBUG == true
puts "current_snapshot: #{new_snapshot}" if DEBUG == true
# TODO: test if disk-space is available (local and remote)
# create new snapshot
unless create_snapshot("#{src}@#{new_snapshot}")
STDERR.puts "Failed to create snapshot: #{src}@#{new_snapshot}"
exit 1
end
datasets.each do |dataset|
if old_snapshot == nil
full_backup(host,dataset,src,dest,new_snapshot)
else
if incremental_backup(host,dataset,src,dest,old_snapshot,new_snapshot)
# all fine destroy old spnapshot
local_snapshot = "#{dataset}@#{old_snapshot}"
remote_snapshot = (dataset == src ? "#{dest}@#{old_snapshot}" : "#{dest}#{dataset.gsub(src,"")}@#{old_snapshot}")
if DELETE_SNAPSHOTS == true
destroy_snapshot(local_snapshot)
destroy_snapshot(remote_snapshot, host)
end
else
STDERR.puts "Failed to send snapshot: #{src}@#{new_snapshot}"
end
end
end
# store new snapshot name
set_latest_snapshot(SNAPSHOT_CONFIG,new_snapshot)
# delete the process lock
delete_lock
exit 0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment