Skip to content

Instantly share code, notes, and snippets.

@gtwy
Last active September 10, 2020 15:57
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gtwy/0c3e03ce625de7700a41642134169553 to your computer and use it in GitHub Desktop.
Save gtwy/0c3e03ce625de7700a41642134169553 to your computer and use it in GitHub Desktop.
VMware ESXi Backup Configuration
=============================== BACKUP SERVER ===============================
timedatectl set-timezone America/New_York
apt update
apt full-upgrade
apt install nfs-server samba git-all python
vim /etc/ssh/sshd_config
Port 22 (uncomment)
Port 12345 (Add this line using the real port number, not 12345)
ufw allow 12345/tcp
reboot
root@backup2018:~# gdisk /dev/sdc
GPT fdisk (gdisk) version 1.0.3
Partition table scan:
MBR: protective
BSD: not present
APM: not present
GPT: present
Found valid GPT with protective MBR; using GPT.
Command (? for help): o
This option deletes all partitions and creates a new protective MBR.
Proceed? (Y/N): Y
Command (? for help): n
Partition number (1-128, default 1):
First sector (34-7814037134, default = 2048) or {+-}size{KMGTP}:
Last sector (2048-7814037134, default = 7814037134) or {+-}size{KMGTP}:
Current type is 'Linux filesystem'
Hex code or GUID (L to show codes, Enter = 8300):
Changed type of partition to 'Linux filesystem'
Command (? for help): w
Final checks complete. About to write GPT data. THIS WILL OVERWRITE EXISTING
PARTITIONS!!
Do you want to proceed? (Y/N): Y
OK; writing new GUID partition table (GPT) to /dev/sdc.
The operation has completed successfully.
root@backup2018:~# mkfs.ext4 /dev/sdc1
mke2fs 1.44.1 (24-Mar-2018)
Creating filesystem with 976754385 4k blocks and 244195328 inodes
Filesystem UUID: fc3d91c2-616b-4a88-9fd9-0e0383e84648
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000, 7962624, 11239424, 20480000, 23887872, 71663616, 78675968,
102400000, 214990848, 512000000, 550731776, 644972544
Allocating group tables: done
Writing inode tables: done
Creating journal (262144 blocks): done
Writing superblocks and filesystem accounting information: done
root@backup2018:~# tune2fs -L sqlbackup /dev/sdc1
tune2fs 1.44.1 (24-Mar-2018)
root@backup2018:~# e2label /dev/sdc1
sqlbackup
root@backup2018:~# cat /etc/fstab
UUID=6d0e3242-5350-4791-8b7a-f076fb339a1e / ext4 defaults 0 0
/swap.img none swap sw 0 0
LABEL=backup /backup/data ext4 defaults 1 2
LABEL=sqlbackup /sync/sqlretain ext4 defaults 1 2
Always connect by label or UUID. Never device name.
root@backup2018:~# mkdir -p /sync/sqlsync/Backups
root@backup2018:~# mkdir /sync/sqlretain
root@backup2018:~# mount LABEL=sqlbackup
root@backup2018:~# mkdir /sync/sqlretain/Backups
root@backup2018:~# chmod -R go-w /sync
root@backup2018:~# cd /sync
root@backup2018:/sync# chmod -R a+rwX sqlsync/Backups
root@backup2018:/sync# chmod -R a+rwX sqlretain/Backups
root@backup2018:/sync# mkdir -p /backup/data
root@backup2018:/sync# mount LABEL=backup
root@backup2018:/sync# cd /backup/data
root@backup2018:/backup/data# mkdir ServerName1
root@backup2018:/backup/data# mkdir ServerName2
root@backup2018:/backup/data# chmod -R a+rwX ServerName*
root@backup2018:/backup# cat /etc/exports
/backup/bin 10.77.1.20(rw,async,no_subtree_check,root_squash)
/backup/data 10.77.1.20(rw,async,no_subtree_check,root_squash)
root@backup2018:/backup# cd /etc/samba
root@backup2018:/etc/samba# cat smb.conf
[global]
workgroup = WORKGROUP
server role = standalone server
server string = Backup Server
netbios name = backup2018
hosts allow = 10.77.1.22
security = user
map to guest = bad user
dns proxy = no
guest account = backupaccount
create mask = 666
directory mask = 777
force user = backupaccount
force group = backupaccount
[sqlsync]
comment = Fileshare for MSSQL Exports
path = /sync/sqlsync
available = yes
browseable = yes
guest ok = yes
writable = yes
read only = no
(replace port, server and user)
scp -P 12345 user@osiris:downloads/hobocopy.tar.gz /backup/
cd /backup/
tar xvzf hobocopy.tar.gz
chmod -R a+rwX /backup/bin/log
root@backup2018:/backup/bin# vim daily_list
Vserver02
root@backup2018:/etc/cron.d# ls -al
total 20
drwxr-xr-x. 2 root root 35 Apr 29 20:41 .
drwxr-xr-x. 78 root root 8192 Apr 29 20:59 ..
-rw-r--r--. 1 root root 128 Jul 27 2015 0hourly
-rw-r--r--. 1 root root 133 Apr 29 20:41 1SQL_BAK
root@backup2018:cron.d# cat 1SQL_BAK
0 10 * * * root rsync -aXs /sync/sqlsync/. /sync/sqlretain/.
0 15 * * * root find /sync/sqlretain/Backups/* -mtime +18 -exec rm {} \;
Keeps 18 days of backups.
systemctl restart nfs-kernel-server
================================ ESXi SERVER ================================
[root@backup backup]# ssh 10.77.1.20
ESXi Server is in UTC time. So, convert desired EST time to UTC time.
9:00pm = 02:00 (the day after)
written in cron as "0 2" (m h)
[root@SightlineESXi:/etc/rc.local.d] cat local.sh
#!/bin/sh
/bin/kill $(cat /var/run/crond.pid)
/bin/echo "0 2 * * * /vmfs/volumes/backup/backup.sh -g /vmfs/volumes/backup/daily.conf -f /vmfs/volumes/backup/daily_list > /vmfs/volumes/backup/log/backup-daily-\$(date +\\%Y-\\%m-\\%d).log" >> /var/spool/cron/crontabs/root
/bin/echo "33 8 * * 0 /vmfs/volumes/backup/backup.sh -g /vmfs/volumes/backup/weekly.conf -f /vmfs/volumes/backup/weekly_list > /vmfs/volumes/backup/log/backup-weekly-\$(date +\\%Y-\\%m-\\%d).log" >> /var/spool/cron/crontabs/root
/bin/crond
exit 0
[root@SightlineESXi:/var/spool/cron/crontabs] cat root
#min hour day mon dow command
1 1 * * * /sbin/tmpwatch.py
1 * * * * /sbin/auto-backup.sh
0 * * * * /usr/lib/vmware/vmksummary/log-heartbeat.py
*/5 * * * * /bin/hostd-probe.sh ++group=host/vim/vmvisor/hostd-probe/stats/sh
00 1 * * * localcli storage core device purge
0 2 * * * /vmfs/volumes/backup/backup.sh -g /vmfs/volumes/backup/daily.conf -f /vmfs/volumes/backup/daily_list > /vmfs/volumes/backup/log/backup-daily-$(date +\%Y-\%m-\%d).log
33 8 * * 0 /vmfs/volumes/backup/backup.sh -g /vmfs/volumes/backup/weekly.conf -f /vmfs/volumes/backup/weekly_list > /vmfs/volumes/backup/log/backup-weekly-$(date +\%Y-\%m-\%d).log
[root@SightlineESXi:~] esxcfg-nas -a -o 10.77.1.5 -s /backup/bin backup
[root@SightlineESXi:~] esxcfg-nas -a -o 10.77.1.5 -s /backup/data backupdata
[root@SightlineESXi:~] esxcfg-nas -l
backupdata is /backup/data from 10.77.1.5 mounted available
backup is /backup/bin from 10.77.1.5 mounted available
If any problems, you can delete with this command
[root@SightlineESXi:~] esxcfg-nas -d backup
================================ SQL SERVER =================================
Execute these three separately on the MS SQL instance
EXEC sp_configure 'show advanced options', 1;
GO
RECONFIGURE;
GO
---
EXEC sp_configure 'xp_cmdshell',1
GO
RECONFIGURE
GO
--- (will give an error)
EXEC XP_CMDSHELL 'net use Z: \\10.77.1.5\sqlsync'
--- (makes sure the volume is there.)
EXEC XP_CMDSHELL 'Dir Z:'
Now setup a maintenance schedule to backup to Z:
@gtwy
Copy link
Author

gtwy commented Sep 10, 2020

For when there are 2 backup drives with alternating backup nights

#!/bin/sh
/bin/kill $(cat /var/run/crond.pid)
/bin/echo "22 2 * * 2,4,6 /vmfs/volumes/backup/backup.sh -g /vmfs/volumes/backup/daily.conf -f /vmfs/volumes/backup/daily_list > /vmfs/volumes/backup/log/backup-daily-\$(date +\\%Y-\\%m-\\%d).log" >> /var/spool/cron/crontabs/root
/bin/echo "22 2 * * 0,3,5 /vmfs/volumes/backup/backup2.sh -g /vmfs/volumes/backup/daily.conf -f /vmfs/volumes/backup/daily_list > /vmfs/volumes/backup/log/backup-daily-\$(date +\\%Y-\\%m-\\%d).log" >> /var/spool/cron/crontabs/root
/bin/crond
exit 0

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment