Skip to content

Instantly share code, notes, and snippets.

@gdha
Created December 17, 2020 12:49
Show Gist options
  • Save gdha/0c8304dbbbaeebf51e74ad01bf8f2b63 to your computer and use it in GitHub Desktop.
Save gdha/0c8304dbbbaeebf51e74ad01bf8f2b63 to your computer and use it in GitHub Desktop.
=~=~=~=~=~=~=~=~=~=~=~= PuTTY log 2020.12.17 08:21:20 =~=~=~=~=~=~=~=~=~=~=~=
cat /var/lib/rear/layout/disklayout.conf
# Disk /dev/sda
# Format: disk <devname> <size(bytes)> <partition label type>
disk /dev/sda 48318382080 msdos
# Partitions on /dev/sda
# Format: part <device> <partition size(bytes)> <partition start(bytes)> <partition type|name> <flags> /dev/<partition>
part /dev/sda 536870912 1048576 primary boot /dev/sda1
part /dev/sda 47780462592 537919488 primary lvm /dev/sda2
# Disk /dev/sdb
# Format: disk <devname> <size(bytes)> <partition label type>
disk /dev/sdb 268435456000 unknown
# Partitions on /dev/sdb
# Format: part <device> <partition size(bytes)> <partition start(bytes)> <partition type|name> <flags> /dev/<partition>
# Disk /dev/sdc
# Format: disk <devname> <size(bytes)> <partition label type>
disk /dev/sdc 107374182400 unknown
# Partitions on /dev/sdc
# Format: part <device> <partition size(bytes)> <partition start(bytes)> <partition type|name> <flags> /dev/<partition>
# Disk /dev/sdd
# Format: disk <devname> <size(bytes)> <partition label type>
disk /dev/sdd 429496729600 loop
# Partitions on /dev/sdd
# Format: part <device> <partition size(bytes)> <partition start(bytes)> <partition type|name> <flags> /dev/<partition>
# Disk /dev/sde
# Format: disk <devname> <size(bytes)> <partition label type>
disk /dev/sde 10737418240 unknown
# Partitions on /dev/sde
# Format: part <device> <partition size(bytes)> <partition start(bytes)> <partition type|name> <flags> /dev/<partition>
# Format for LVM PVs
# lvmdev <volume_group> <device> [<uuid>] [<size(bytes)>]
lvmdev /dev/vg02 /dev/sdc UHT8T8-DKQi-UcDg-TTIJ-9IuI-DamD-OkHz3J 209715200
lvmdev /dev/vg00 /dev/sda2 DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN 93321216
lvmdev /dev/vg00 /dev/sde MTm8Ad-YHtu-emgm-nqi8-Z6FW-8t2Y-RMlwrz 20971520
lvmdev /dev/vg01 /dev/sdb ph05Xa-NkTS-8sEa-K5At-pA8E-Q9hH-GUI63l 524288000
# Format for LVM VGs
# lvmgrp <volume_group> <extentsize> [<size(extents)>] [<size(bytes)>]
lvmgrp /dev/vg02 4096 25599 104853504
lvmgrp /dev/vg00 4096 13950 57139200
lvmgrp /dev/vg01 4096 63999 262139904
# Format for LVM LVs
# lvmvol <volume_group> <name> <size(bytes)> <layout> [key:value ...]
lvmvol /dev/vg00 lv_audit 4294967296b linear
lvmvol /dev/vg00 lv_home 4294967296b linear
lvmvol /dev/vg00 lv_log 4294967296b linear
lvmvol /dev/vg00 lv_openv 5586812928b linear
lvmvol /dev/vg00 lv_opt 5368709120b linear
lvmvol /dev/vg00 lv_root 8589934592b linear
lvmvol /dev/vg00 lv_tanium 3221225472b linear
lvmvol /dev/vg00 lv_tmp 2147483648b linear
# WARNING: Volume vg00/lv_var has multiple segments. Restoring it in Migration Mode using 'lvcreate' won't preserve segments and properties of the other segments as well!
lvmvol /dev/vg00 lv_var 8589934592b linear
# extra parameters for the line above not taken into account when restoring using 'lvcreate': segmentsize:4294967296b
#lvmvol /dev/vg00 lv_var 8589934592b linear
# extra parameters for the line above not taken into account when restoring using 'lvcreate': segmentsize:4294967296b
lvmvol /dev/vg00 swap 4294967296b linear
lvmvol /dev/vg01 lv00 268431261696b linear
lvmvol /dev/vg02 lv00 107369988096b linear
# Filesystems (only ext2,ext3,ext4,vfat,xfs,reiserfs,btrfs are supported).
# Format: fs <device> <mountpoint> <fstype> [uuid=<uuid>] [label=<label>] [<attributes>]
fs /dev/mapper/vg00-lv_audit /var/log/audit ext3 uuid=8e92b10f-1065-46ff-ae41-eb9dcf594a25 label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg00-lv_home /home ext3 uuid=05c6e67f-b1c6-43f5-95fb-07dd66ddbc79 label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,nodev,relatime,data=ordered
fs /dev/mapper/vg00-lv_log /var/log ext3 uuid=27c80cff-f6b3-4079-8d5f-b29b315be900 label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg00-lv_openv /usr/openv ext3 uuid=7a7217a0-ee6d-4606-a594-33225cd18e20 label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16365 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg00-lv_opt /opt ext3 uuid=eca735a8-927d-4c24-ac7b-72ae0846aa81 label= blocksize=4096 reserved_blocks=5% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,nosuid,nodev,relatime,data=ordered
fs /dev/mapper/vg00-lv_root / ext3 uuid=570493f8-d3b9-4110-9fda-f39c13e5d33d label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg00-lv_tanium /opt/Tanium ext3 uuid=dc1ed788-208b-4007-be9c-d97b8f30812c label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg00-lv_tmp /tmp ext3 uuid=d74c63ed-f72e-400a-954f-164acd8b5f25 label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,nosuid,nodev,relatime,data=ordered
fs /dev/mapper/vg00-lv_var /var ext3 uuid=a166eb11-535c-409f-9bfe-d7d2ba025d1c label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg01-lv00 /app/util ext3 uuid=fa4b1411-af38-4fe6-866e-9a9e746ff4fa label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16383 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/mapper/vg02-lv00 /app/gtsc ext3 uuid=a344c150-c2d8-485e-97d7-8243a609d89e label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16383 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/sda1 /boot ext3 uuid=3b9c84f9-1cf9-480c-8d46-26f8dd7b7e70 label= blocksize=4096 reserved_blocks=4% max_mounts=-1 check_interval=0d bytes_per_inode=16384 default_mount_options=user_xattr,acl options=rw,relatime,data=ordered
fs /dev/sdd /app/gtsc/docker xfs uuid=e3fbb87a-2b3d-4f5f-8245-af9320935d7f label= options=rw,relatime,attr2,inode64,noquota
# Swap partitions or swap files
# Format: swap <filename> uuid=<uuid> label=<label>
swap /dev/mapper/vg00-swap uuid=109e1047-d5bb-4bd1-ad17-f013910321a7 label=
RESCUE ITSGBHHLSP01629:~ # date
Thu Dec 17 08:21:27 CET 2020
RESCUE ITSGBHHLSP01629:~ # rear - v  v recover
Relax-and-Recover 2.4 / Git
Using log file: /var/log/rear/rear-ITSGBHHLSP01629.log
Running workflow recover within the ReaR rescue/recovery system
Starting required daemons for NFS: RPC portmapper (portmap or rpcbind) and rpc.statd if available.
Started RPC portmapper 'rpcbind'.
RPC portmapper 'rpcbind' available.
Started rpc.statd.
RPC status rpc.statd available.
Starting rpc.idmapd failed.
Using backup archive '/tmp/rear.z4FzZYUX19es1s0/outputfs/image/backup.tar.gz'
Calculating backup archive size
Backup archive size is 4.5G /tmp/rear.z4FzZYUX19es1s0/outputfs/image/backup.tar.gz (compressed)
Comparing disks
Ambiguous possible target disks need manual configuration (more than one with same size found)
Switching to manual disk layout configuration
Using /dev/sda (same name and same size) for recreating /dev/sda
Using /dev/sdb (same name and same size) for recreating /dev/sdb
Using /dev/sdc (same name and same size) for recreating /dev/sdc
Using /dev/sdd (same name and same size) for recreating /dev/sdd
Using /dev/sde (same name and same size) for recreating /dev/sde
Current disk mapping table (source -> target):
/dev/sda /dev/sda
/dev/sdb /dev/sdb
/dev/sdc /dev/sdc
/dev/sdd /dev/sdd
/dev/sde /dev/sde
Confirm or edit the disk mapping
1) Confirm disk mapping and continue 'rear recover'
2) Edit disk mapping (/var/lib/rear/layout/disk_mappings)
3) Use Relax-and-Recover shell and return back to here
4) Abort 'rear recover'
(default '1' timeout 300 seconds)
1
User confirmed disk mapping
Confirm or edit the disk layout file
1) Confirm disk layout and continue 'rear recover'
2) Edit disk layout (/var/lib/rear/layout/disklayout.conf)
3) View disk layout (/var/lib/rear/layout/disklayout.conf)
4) View original disk space usage (/var/lib/rear/layout/config/df.txt)
5) Use Relax-and-Recover shell and return back to here
6) Abort 'rear recover'
(default '1' timeout 300 seconds)
1
User confirmed disk layout file
Confirm or edit the disk recreation script
1) Confirm disk recreation script and continue 'rear recover'
2) Edit disk recreation script (/var/lib/rear/layout/diskrestore.sh)
3) View disk recreation script (/var/lib/rear/layout/diskrestore.sh)
4) View original disk space usage (/var/lib/rear/layout/config/df.txt)
5) Use Relax-and-Recover shell and return back to here
6) Abort 'rear recover'
(default '1' timeout 300 seconds)
1
User confirmed disk recreation script
Disks to be overwritten: /dev/sda
1) Confirm disks to be completely overwritten and continue 'rear recover'
2) Use Relax-and-Recover shell and return back to here
3) Abort 'rear recover'
(default '1' timeout 300 seconds)
1
User confirmed disks to be overwritten
Wiping child devices of /dev/sda in reverse ordering: /dev/sda2 /dev/sda1 /dev/sda
Start system layout restoration.
Disk '/dev/sda': creating 'msdos' partition table
Disk '/dev/sda': creating partition number 1 with name 'primary'
Disk '/dev/sda': creating partition number 2 with name 'primary'
Creating LVM PV /dev/sdc
Creating LVM PV /dev/sda2
Creating LVM PV /dev/sde
Creating LVM PV /dev/sdb
Creating LVM VG 'vg02'; Warning: some properties may not be preserved...
Creating LVM VG 'vg00'; Warning: some properties may not be preserved...
The disk layout recreation script failed
1) Rerun disk recreation script (/var/lib/rear/layout/diskrestore.sh)
2) View 'rear recover' log file (/var/log/rear/rear-ITSGBHHLSP01629.log)
3) Edit disk recreation script (/var/lib/rear/layout/diskrestore.sh)
4) View original disk space usage (/var/lib/rear/layout/config/df.txt)
5) Use Relax-and-Recover shell and return back to here
6) Abort 'rear recover'
(default '1' timeout 300 seconds)
6
ERROR: User chose to abort 'rear recover' in /usr/share/rear/layout/recreate/default/200_run_layout_code.sh
Aborting due to an error, check /var/log/rear/rear-ITSGBHHLSP01629.log for details
Exiting rear recover (PID 747) and its descendant processes
Running exit tasks
Terminated
RESCUE ITSGBHHLSP01629:~ # vgs
VG #PV #LV #SN Attr VSize VFree
vg00 2 10 0 wz--n- 54.49g <7.29g
vg02 1 0 0 wz--n- <100.00g <100.00g
RESCUE ITSGBHHLSP01629:~ # pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg00 lvm2 a-- <44.50g 300.00m
/dev/sdb lvm2 --- 250.00g 250.00g
/dev/sdc vg02 lvm2 a-- <100.00g <100.00g
/dev/sde vg00 lvm2 a-- <10.00g <7.00g
RESCUE ITSGBHHLSP01629:~ # cat /var/log/rear/rear-ITSGBHHLSP01629.log
2020-12-17 08:21:39.321656742 Relax-and-Recover 2.4 / Git
2020-12-17 08:21:39.322867182 Command line options: /bin/rear -v recover
2020-12-17 08:21:39.323997135 Using log file: /var/log/rear/rear-ITSGBHHLSP01629.log
2020-12-17 08:21:39.325405060 Including /etc/rear/os.conf
2020-12-17 08:21:39.328604981 Including conf/Linux-i386.conf
2020-12-17 08:21:39.329987069 Including conf/GNU/Linux.conf
2020-12-17 08:21:39.334658092 Including /etc/rear/site.conf
2020-12-17 08:21:39.336097971 Including /etc/rear/local.conf
2020-12-17 08:21:39.337715976 Including /etc/rear/rescue.conf
2020-12-17 08:21:39.340365877 ======================
2020-12-17 08:21:39.341500363 Running 'init' stage
2020-12-17 08:21:39.342627193 ======================
2020-12-17 08:21:39.349120770 Including init/default/005_verify_os_conf.sh
2020-12-17 08:21:39.352692303 Including init/default/010_set_drlm_env.sh
2020-12-17 08:21:39.356480491 Including init/default/030_update_recovery_system.sh
2020-12-17 08:21:39.360114103 Including init/default/050_check_rear_recover_mode.sh
2020-12-17 08:21:39.361441870 Running workflow recover within the ReaR rescue/recovery system
2020-12-17 08:21:39.362851664 Finished running 'init' stage in 0 seconds
2020-12-17 08:21:39.370907184 Using build area '/tmp/rear.z4FzZYUX19es1s0'
mkdir: created directory '/tmp/rear.z4FzZYUX19es1s0/rootfs'
mkdir: created directory '/tmp/rear.z4FzZYUX19es1s0/tmp'
2020-12-17 08:21:39.374343846 Running recover workflow
2020-12-17 08:21:39.375559597 ======================
2020-12-17 08:21:39.376647499 Running 'setup' stage
2020-12-17 08:21:39.377741093 ======================
2020-12-17 08:21:39.384102258 Including setup/default/005_ssh_agent_start.sh
2020-12-17 08:21:39.387305129 Starting up ssh-agent
Agent pid 916
2020-12-17 08:21:39.395945137 Including setup/default/010_pre_recovery_script.sh
2020-12-17 08:21:39.397180220 Finished running 'setup' stage in 0 seconds
2020-12-17 08:21:39.398351701 ======================
2020-12-17 08:21:39.399468928 Running 'verify' stage
2020-12-17 08:21:39.400594857 ======================
2020-12-17 08:21:39.407266941 Including verify/default/020_cciss_scsi_engage.sh
2020-12-17 08:21:39.412220936 Including verify/default/020_translate_url.sh
2020-12-17 08:21:39.416088779 Including verify/default/030_translate_tape.sh
2020-12-17 08:21:39.421848884 Including verify/default/040_validate_variables.sh
2020-12-17 08:21:39.425594175 Including verify/NETFS/default/050_check_NETFS_requirements.sh
2020-12-17 08:21:39.430536393 Skipping 'ping' test for host 'itsbebevnobkup2.jnj.com' in BACKUP_URL 'nfs://itsbebevnobkup2.jnj.com/vol/itsbebevnobkup2_linux/linux_images_1/itsgbhhlsp01629.jnj.com'
2020-12-17 08:21:39.443542777 Including verify/default/050_create_mappings_dir.sh
2020-12-17 08:21:39.447308247 Including verify/GNU/Linux/050_sane_recovery_check.sh
2020-12-17 08:21:39.451005448 Including verify/NETFS/default/050_start_required_nfs_daemons.sh
2020-12-17 08:21:39.454472278 Starting required daemons for NFS: RPC portmapper (portmap or rpcbind) and rpc.statd if available.
/usr/share/rear/lib/_input-output-functions.sh: line 331: type: portmap: not found
2020-12-17 08:21:39.461097706 Started RPC portmapper 'rpcbind'.
2020-12-17 08:21:39.479459335 RPC portmapper 'rpcbind' available.
2020-12-17 08:21:39.496290718 Started rpc.statd.
2020-12-17 08:21:39.500212600 RPC status rpc.statd available.
rpc.idmapd: conf_set: duplicate tag [General]:Domain, ignoring...
2020-12-17 08:21:39.504145149 Starting rpc.idmapd failed.
mount: unknown filesystem type 'nfsd'
2020-12-17 08:21:39.599204548 Including verify/NETFS/default/060_mount_NETFS_path.sh
mkdir: created directory '/tmp/rear.z4FzZYUX19es1s0/outputfs'
2020-12-17 08:21:39.607458607 Mounting with 'mount -v -t nfs -o nfsvers=3,nolock itsbebevnobkup2.jnj.com:/vol/itsbebevnobkup2_linux/linux_images_1/itsgbhhlsp01629.jnj.com /tmp/rear.z4FzZYUX19es1s0/outputfs'
mount.nfs: trying 10.166.30.34 prog 100003 vers 3 prot TCP port 2049
mount.nfs: trying 10.166.30.34 prog 100005 vers 3 prot UDP port 4046
mount.nfs: timeout set for Thu Dec 17 08:23:39 2020
mount.nfs: trying text-based options 'nfsvers=3,nolock,addr=10.166.30.34'
mount.nfs: prog 100003, trying vers=3, prot=6
mount.nfs: prog 100005, trying vers=3, prot=17
2020-12-17 08:21:39.774368013 Including verify/NETFS/default/070_set_backup_archive.sh
2020-12-17 08:21:39.779024062 Using backup archive '/tmp/rear.z4FzZYUX19es1s0/outputfs/image/backup.tar.gz'
2020-12-17 08:21:39.836480699 Including verify/NETFS/default/090_set_readonly_options.sh
2020-12-17 08:21:39.840368470 Including verify/GNU/Linux/230_storage_and_network_modules.sh
2020-12-17 08:21:39.841822319 Including storage drivers
2020-12-17 08:21:39.847176279 Including network drivers
2020-12-17 08:21:39.853702044 Including crypto drivers
2020-12-17 08:21:39.858082638 Including virtualization drivers
2020-12-17 08:21:39.861932676 Including additional drivers
2020-12-17 08:21:39.868116495 Including verify/GNU/Linux/260_recovery_storage_drivers.sh
2020-12-17 08:21:39.950402197 No driver migration: '/tmp/rear.z4FzZYUX19es1s0/tmp/storage_drivers' and '/var/lib/rear/recovery/storage_drivers' are the same
2020-12-17 08:21:39.954324569 Including verify/NETFS/default/550_check_backup_archive.sh
2020-12-17 08:21:39.974333176 Calculating backup archive size
2020-12-17 08:21:39.977562171 Backup archive size is 4.5G /tmp/rear.z4FzZYUX19es1s0/outputfs/image/backup.tar.gz (compressed)
2020-12-17 08:21:39.981895633 Including verify/NETFS/default/600_check_encryption_key.sh
2020-12-17 08:21:39.985676982 Including verify/NETFS/default/980_umount_NETFS_dir.sh
2020-12-17 08:21:39.989169311 Unmounting '/tmp/rear.z4FzZYUX19es1s0/outputfs'
/tmp/rear.z4FzZYUX19es1s0/outputfs: nfs mount point detected
/tmp/rear.z4FzZYUX19es1s0/outputfs: umounted
rmdir: removing directory, '/tmp/rear.z4FzZYUX19es1s0/outputfs'
2020-12-17 08:21:40.031963143 Finished running 'verify' stage in 1 seconds
2020-12-17 08:21:40.033239123 ======================
2020-12-17 08:21:40.034399454 Running 'layout/prepare' stage
2020-12-17 08:21:40.035571620 ======================
2020-12-17 08:21:40.042842060 Including layout/prepare/default/010_prepare_files.sh
2020-12-17 08:21:40.049899457 Including layout/prepare/GNU/Linux/100_include_partition_code.sh
2020-12-17 08:21:40.060797778 Including layout/prepare/GNU/Linux/110_include_lvm_code.sh
2020-12-17 08:21:40.071189791 Including layout/prepare/GNU/Linux/120_include_raid_code.sh
/usr/share/rear/lib/_input-output-functions.sh: line 331: type: mdadm: not found
2020-12-17 08:21:40.075598544 Including layout/prepare/GNU/Linux/130_include_filesystem_code.sh
2020-12-17 08:21:40.080594211 Including layout/prepare/GNU/Linux/130_include_mount_filesystem_code.sh
2020-12-17 08:21:40.084882025 Including layout/prepare/GNU/Linux/130_include_mount_subvolumes_code.sh
2020-12-17 08:21:40.090741029 Including layout/prepare/GNU/Linux/140_include_swap_code.sh
2020-12-17 08:21:40.098873107 Including layout/prepare/GNU/Linux/150_include_drbd_code.sh
2020-12-17 08:21:40.102836433 Including layout/prepare/GNU/Linux/160_include_luks_code.sh
2020-12-17 08:21:40.107103602 Including layout/prepare/GNU/Linux/170_include_hpraid_code.sh
/usr/share/rear/lib/_input-output-functions.sh: line 331: type: hpacucli: not found
/usr/share/rear/lib/_input-output-functions.sh: line 331: type: hpssacli: not found
/usr/share/rear/lib/_input-output-functions.sh: line 331: type: ssacli: not found
2020-12-17 08:21:40.111339646 Including layout/prepare/GNU/Linux/180_include_opaldisk_code.sh
2020-12-17 08:21:40.115488337 Including layout/prepare/default/200_recreate_hpraid.sh
2020-12-17 08:21:40.120715471 Including layout/prepare/GNU/Linux/210_load_multipath.sh
2020-12-17 08:21:40.126257253 Including layout/prepare/default/250_compare_disks.sh
2020-12-17 08:21:40.127814253 Comparing disks
2020-12-17 08:21:40.143896121 Ambiguous possible target disks need manual configuration (more than one with same size found)
2020-12-17 08:21:40.145684987 Switching to manual disk layout configuration
2020-12-17 08:21:40.149924072 Including layout/prepare/default/270_overrule_migration_mode.sh
2020-12-17 08:21:40.155550205 Including layout/prepare/default/300_map_disks.sh
2020-12-17 08:21:40.385064858 Using /dev/sda (same name and same size) for recreating /dev/sda
2020-12-17 08:21:40.394363122 Using /dev/sdb (same name and same size) for recreating /dev/sdb
2020-12-17 08:21:40.403665805 Using /dev/sdc (same name and same size) for recreating /dev/sdc
2020-12-17 08:21:40.412899478 Using /dev/sdd (same name and same size) for recreating /dev/sdd
2020-12-17 08:21:40.422078179 Using /dev/sde (same name and same size) for recreating /dev/sde
2020-12-17 08:21:40.431826540 Current disk mapping table (source -> target):
2020-12-17 08:21:40.435144899 /dev/sda /dev/sda
/dev/sdb /dev/sdb
/dev/sdc /dev/sdc
/dev/sdd /dev/sdd
/dev/sde /dev/sde
2020-12-17 08:21:40.438229666 UserInput: called in /usr/share/rear/layout/prepare/default/300_map_disks.sh line 211
2020-12-17 08:21:40.441754039 UserInput: Default input in choices - using choice number 1 as default input
2020-12-17 08:21:40.443376083 Confirm or edit the disk mapping
2020-12-17 08:21:40.444927515 1) Confirm disk mapping and continue 'rear recover'
2020-12-17 08:21:40.446464837 2) Edit disk mapping (/var/lib/rear/layout/disk_mappings)
2020-12-17 08:21:40.447968893 3) Use Relax-and-Recover shell and return back to here
2020-12-17 08:21:40.449457194 4) Abort 'rear recover'
2020-12-17 08:21:40.450987379 (default '1' timeout 300 seconds)
2020-12-17 08:21:44.721535190 UserInput: 'read' got as user input '1'
2020-12-17 08:21:44.725452450 User confirmed disk mapping
2020-12-17 08:21:44.737031503 Including layout/prepare/default/310_remove_exclusions.sh
2020-12-17 08:21:44.744286841 Including layout/prepare/default/320_apply_mappings.sh
2020-12-17 08:21:44.747284506 Completely identical layout mapping in /var/lib/rear/layout/disk_mappings
2020-12-17 08:21:44.751486480 Including layout/prepare/default/420_autoresize_last_partitions.sh
48318382080
268435456000
107374182400
429496729600
10737418240
2020-12-17 08:21:44.776857012 Including layout/prepare/default/430_autoresize_all_partitions.sh
2020-12-17 08:21:44.781071333 Including layout/prepare/default/500_confirm_layout_file.sh
2020-12-17 08:21:44.785351038 UserInput: called in /usr/share/rear/layout/prepare/default/500_confirm_layout_file.sh line 26
2020-12-17 08:21:44.788851894 UserInput: Default input in choices - using choice number 1 as default input
2020-12-17 08:21:44.790589797 Confirm or edit the disk layout file
2020-12-17 08:21:44.792227701 1) Confirm disk layout and continue 'rear recover'
2020-12-17 08:21:44.793742562 2) Edit disk layout (/var/lib/rear/layout/disklayout.conf)
2020-12-17 08:21:44.795241435 3) View disk layout (/var/lib/rear/layout/disklayout.conf)
2020-12-17 08:21:44.796721499 4) View original disk space usage (/var/lib/rear/layout/config/df.txt)
2020-12-17 08:21:44.798289670 5) Use Relax-and-Recover shell and return back to here
2020-12-17 08:21:44.799849282 6) Abort 'rear recover'
2020-12-17 08:21:44.801412521 (default '1' timeout 300 seconds)
2020-12-17 08:21:51.559907219 UserInput: 'read' got as user input '1'
2020-12-17 08:21:51.563973064 User confirmed disk layout file
2020-12-17 08:21:51.568401397 Including layout/prepare/default/510_list_dependencies.sh
2020-12-17 08:21:51.796981469 Including layout/prepare/default/520_exclude_components.sh
2020-12-17 08:21:51.802615995 Including layout/prepare/default/540_generate_device_code.sh
2020-12-17 08:21:51.900000539 No partitions on device /dev/sdb.
2020-12-17 08:21:51.921108796 No partitions on device /dev/sdc.
2020-12-17 08:21:51.942539351 No partitions on device /dev/sdd.
2020-12-17 08:21:51.963764685 No partitions on device /dev/sde.
2020-12-17 08:21:52.357370068 Begin create_fs( fs:/ )
2020-12-17 08:21:52.362440576 Begin mount_fs( fs:/ )
2020-12-17 08:21:52.366349532 End mount_fs( fs:/ )
2020-12-17 08:21:52.367805692 End create_fs( fs:/ )
2020-12-17 08:21:52.389347900 Begin create_fs( fs:/home )
2020-12-17 08:21:52.394558140 Begin mount_fs( fs:/home )
2020-12-17 08:21:52.398663663 End mount_fs( fs:/home )
2020-12-17 08:21:52.400431710 End create_fs( fs:/home )
2020-12-17 08:21:52.426902851 Begin create_fs( fs:/usr/openv )
2020-12-17 08:21:52.432208862 Begin mount_fs( fs:/usr/openv )
2020-12-17 08:21:52.436765408 End mount_fs( fs:/usr/openv )
2020-12-17 08:21:52.438306448 End create_fs( fs:/usr/openv )
2020-12-17 08:21:52.465135140 Begin create_fs( fs:/opt )
2020-12-17 08:21:52.470477017 Begin mount_fs( fs:/opt )
2020-12-17 08:21:52.474805246 End mount_fs( fs:/opt )
2020-12-17 08:21:52.476384653 End create_fs( fs:/opt )
2020-12-17 08:21:52.504229818 Begin create_fs( fs:/opt/Tanium )
2020-12-17 08:21:52.509570685 Begin mount_fs( fs:/opt/Tanium )
2020-12-17 08:21:52.513652349 End mount_fs( fs:/opt/Tanium )
2020-12-17 08:21:52.515196345 End create_fs( fs:/opt/Tanium )
2020-12-17 08:21:52.542322117 Begin create_fs( fs:/tmp )
2020-12-17 08:21:52.547695085 Begin mount_fs( fs:/tmp )
2020-12-17 08:21:52.551871666 End mount_fs( fs:/tmp )
2020-12-17 08:21:52.553428640 End create_fs( fs:/tmp )
2020-12-17 08:21:52.580580627 Begin create_fs( fs:/var )
2020-12-17 08:21:52.585817961 Begin mount_fs( fs:/var )
2020-12-17 08:21:52.589950286 End mount_fs( fs:/var )
2020-12-17 08:21:52.591473921 End create_fs( fs:/var )
2020-12-17 08:21:52.614066425 Begin create_fs( fs:/var/log )
2020-12-17 08:21:52.619518479 Begin mount_fs( fs:/var/log )
2020-12-17 08:21:52.623655838 End mount_fs( fs:/var/log )
2020-12-17 08:21:52.625200328 End create_fs( fs:/var/log )
2020-12-17 08:21:52.642483097 Begin create_fs( fs:/var/log/audit )
2020-12-17 08:21:52.648001256 Begin mount_fs( fs:/var/log/audit )
2020-12-17 08:21:52.652293704 End mount_fs( fs:/var/log/audit )
2020-12-17 08:21:52.653849162 End create_fs( fs:/var/log/audit )
2020-12-17 08:21:52.669271590 Begin create_fs( fs:/app/util )
2020-12-17 08:21:52.675165786 Begin mount_fs( fs:/app/util )
2020-12-17 08:21:52.679395825 End mount_fs( fs:/app/util )
2020-12-17 08:21:52.680941534 End create_fs( fs:/app/util )
2020-12-17 08:21:52.696267981 Begin create_fs( fs:/app/gtsc )
2020-12-17 08:21:52.701759365 Begin mount_fs( fs:/app/gtsc )
2020-12-17 08:21:52.705968376 End mount_fs( fs:/app/gtsc )
2020-12-17 08:21:52.707543716 End create_fs( fs:/app/gtsc )
2020-12-17 08:21:52.723060880 Begin create_fs( fs:/boot )
2020-12-17 08:21:52.728753786 Begin mount_fs( fs:/boot )
2020-12-17 08:21:52.733468754 End mount_fs( fs:/boot )
2020-12-17 08:21:52.735142891 End create_fs( fs:/boot )
2020-12-17 08:21:52.751587928 Begin create_fs( fs:/app/gtsc/docker )
2020-12-17 08:21:52.913780960 Begin mount_fs( fs:/app/gtsc/docker )
2020-12-17 08:21:52.917816636 End mount_fs( fs:/app/gtsc/docker )
2020-12-17 08:21:52.919347317 End create_fs( fs:/app/gtsc/docker )
2020-12-17 08:21:52.946825105 Including layout/prepare/default/550_finalize_script.sh
2020-12-17 08:21:52.952268378 Including layout/prepare/default/600_show_unprocessed.sh
2020-12-17 08:21:52.959054785 Including layout/prepare/default/610_exclude_from_restore.sh
2020-12-17 08:21:52.960526291 Finished running 'layout/prepare' stage in 12 seconds
2020-12-17 08:21:52.961830533 ======================
2020-12-17 08:21:52.963075146 Running 'layout/recreate' stage
2020-12-17 08:21:52.964288975 ======================
2020-12-17 08:21:52.971623663 Including layout/recreate/default/100_confirm_layout_code.sh
2020-12-17 08:21:52.975836358 UserInput: called in /usr/share/rear/layout/recreate/default/100_confirm_layout_code.sh line 26
2020-12-17 08:21:52.979421402 UserInput: Default input in choices - using choice number 1 as default input
2020-12-17 08:21:52.981275992 Confirm or edit the disk recreation script
2020-12-17 08:21:52.982861186 1) Confirm disk recreation script and continue 'rear recover'
2020-12-17 08:21:52.984388811 2) Edit disk recreation script (/var/lib/rear/layout/diskrestore.sh)
2020-12-17 08:21:52.985894965 3) View disk recreation script (/var/lib/rear/layout/diskrestore.sh)
2020-12-17 08:21:52.987427669 4) View original disk space usage (/var/lib/rear/layout/config/df.txt)
2020-12-17 08:21:52.988943362 5) Use Relax-and-Recover shell and return back to here
2020-12-17 08:21:52.990471435 6) Abort 'rear recover'
2020-12-17 08:21:52.992065915 (default '1' timeout 300 seconds)
2020-12-17 08:22:02.688666136 UserInput: 'read' got as user input '1'
2020-12-17 08:22:02.693092916 User confirmed disk recreation script
2020-12-17 08:22:02.698702572 Including layout/recreate/default/120_confirm_wipedisk_disks.sh
2020-12-17 08:22:02.704639311 UserInput: called in /usr/share/rear/layout/recreate/default/120_confirm_wipedisk_disks.sh line 33
2020-12-17 08:22:02.708251140 UserInput: Default input in choices - using choice number 1 as default input
2020-12-17 08:22:02.709870875 Disks to be overwritten: /dev/sda
2020-12-17 08:22:02.711528752 1) Confirm disks to be completely overwritten and continue 'rear recover'
2020-12-17 08:22:02.713086744 2) Use Relax-and-Recover shell and return back to here
2020-12-17 08:22:02.714652166 3) Abort 'rear recover'
2020-12-17 08:22:02.716182034 (default '1' timeout 300 seconds)
2020-12-17 08:22:10.295201171 UserInput: 'read' got as user input '1'
2020-12-17 08:22:10.299597719 User confirmed disks to be overwritten
2020-12-17 08:22:10.304515583 Including layout/recreate/default/150_wipe_disks.sh
2020-12-17 08:22:10.306181582 Block devices structure on the unchanged replacement hardware before the disks /dev/sda will be wiped (lsblk):
2020-12-17 08:22:10.313481917 NAME KNAME PKNAME TRAN TYPE FSTYPE SIZE MOUNTPOINT
/dev/sda /dev/sda disk 45G
|-/dev/sda1 /dev/sda1 /dev/sda part ext3 512M
`-/dev/sda2 /dev/sda2 /dev/sda part LVM2_member 44.5G
/dev/sdb /dev/sdb disk LVM2_member 250G
/dev/sdc /dev/sdc disk LVM2_member 100G
/dev/sdd /dev/sdd disk xfs 400G
/dev/sde /dev/sde disk LVM2_member 10G
/dev/sr0 /dev/sr0 sata rom iso9660 262.9M
WARNING: PV /dev/sda2 is used by VG vg00.
WARNING: Wiping physical volume label from /dev/sda2 of volume group "vg00".
Labels on physical volume "/dev/sda2" successfully wiped.
2020-12-17 08:22:10.438025729 Wiping child devices of /dev/sda in reverse ordering: /dev/sda2 /dev/sda1 /dev/sda
16+0 records in
16+0 records out
16777216 bytes (17 MB) copied, 0.0175839 s, 954 MB/s
16+0 records in
16+0 records out
16777216 bytes (17 MB) copied, 0.0271414 s, 618 MB/s
16+0 records in
16+0 records out
16777216 bytes (17 MB) copied, 0.0170919 s, 982 MB/s
16+0 records in
16+0 records out
16777216 bytes (17 MB) copied, 0.0518887 s, 323 MB/s
16+0 records in
16+0 records out
16777216 bytes (17 MB) copied, 0.0148915 s, 1.1 GB/s
16+0 records in
16+0 records out
16777216 bytes (17 MB) copied, 0.0154752 s, 1.1 GB/s
2020-12-17 08:22:10.600773676 Remaining block devices structure after the disks /dev/sda were wiped (lsblk):
2020-12-17 08:22:10.607778944 NAME KNAME PKNAME TRAN TYPE FSTYPE SIZE MOUNTPOINT
/dev/sda /dev/sda disk 45G
/dev/sdb /dev/sdb disk LVM2_member 250G
/dev/sdc /dev/sdc disk LVM2_member 100G
/dev/sdd /dev/sdd disk xfs 400G
/dev/sde /dev/sde disk LVM2_member 10G
/dev/sr0 /dev/sr0 sata rom iso9660 262.9M
2020-12-17 08:22:10.612287396 Including layout/recreate/default/200_run_layout_code.sh
2020-12-17 08:22:10.616077205 Start system layout restoration.
Couldn't find device with uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN.
+++ create_component /dev/sda disk
+++ local device=/dev/sda
+++ local type=disk
+++ local touchfile=disk--dev-sda
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sda ']'
+++ return 0
+++ Log 'Stop mdadm'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:10.638170428 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:10.638170428 Stop mdadm'
2020-12-17 08:22:10.638170428 Stop mdadm
+++ grep -q md /proc/mdstat
+++ Log 'Erasing MBR of disk /dev/sda'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:10.640868864 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:10.640868864 Erasing MBR of disk /dev/sda'
2020-12-17 08:22:10.640868864 Erasing MBR of disk /dev/sda
+++ dd if=/dev/zero of=/dev/sda bs=512 count=1
1+0 records in
1+0 records out
512 bytes (512 B) copied, 0.000502975 s, 1.0 MB/s
+++ sync
+++ create_disk_label /dev/sda msdos
+++ local disk=/dev/sda label=msdos
+++ [[ -n '' ]]
+++ current_disk=/dev/sda
+++ [[ -n '' ]]
+++ disk_label=msdos
+++ LogPrint 'Disk '\''/dev/sda'\'': creating '\''msdos'\'' partition table'
+++ Log 'Disk '\''/dev/sda'\'': creating '\''msdos'\'' partition table'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:10.644740799 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:10.644740799 Disk '\''/dev/sda'\'': creating '\''msdos'\'' partition table'
2020-12-17 08:22:10.644740799 Disk '/dev/sda': creating 'msdos' partition table
+++ Print 'Disk '\''/dev/sda'\'': creating '\''msdos'\'' partition table'
+++ test 1
+++ echo -e 'Disk '\''/dev/sda'\'': creating '\''msdos'\'' partition table'
+++ parted -s /dev/sda mklabel msdos
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ create_disk_partition /dev/sda primary 1 1048576 537919487
+++ local disk=/dev/sda name=primary number=1 startB=1048576 endB=537919487
+++ [[ -n /dev/sda ]]
+++ [[ /dev/sda != \/\d\e\v\/\s\d\a ]]
+++ current_disk=/dev/sda
+++ [[ ! -n msdos ]]
+++ '[' msdos == msdos ']'
+++ [[ 1 -le last_partition_number ]]
+++ [[ 0 -eq 0 ]]
+++ LogPrint 'Disk '\''/dev/sda'\'': creating partition number 1 with name '\''primary'\'''
+++ Log 'Disk '\''/dev/sda'\'': creating partition number 1 with name '\''primary'\'''
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:10.655590461 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:10.655590461 Disk '\''/dev/sda'\'': creating partition number 1 with name '\''primary'\'''
2020-12-17 08:22:10.655590461 Disk '/dev/sda': creating partition number 1 with name 'primary'
+++ Print 'Disk '\''/dev/sda'\'': creating partition number 1 with name '\''primary'\'''
+++ test 1
+++ echo -e 'Disk '\''/dev/sda'\'': creating partition number 1 with name '\''primary'\'''
+++ [[ ! -n 537919487 ]]
+++ parted -s /dev/sda mkpart primary 1048576B 537919487B
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ last_partition_number=1
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ parted -s /dev/sda set 1 boot on
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ create_disk_partition /dev/sda primary 2 537919488 48318382079
+++ local disk=/dev/sda name=primary number=2 startB=537919488 endB=48318382079
+++ [[ -n /dev/sda ]]
+++ [[ /dev/sda != \/\d\e\v\/\s\d\a ]]
+++ current_disk=/dev/sda
+++ [[ ! -n msdos ]]
+++ '[' msdos == msdos ']'
+++ [[ 2 -le last_partition_number ]]
+++ [[ 0 -eq 0 ]]
+++ LogPrint 'Disk '\''/dev/sda'\'': creating partition number 2 with name '\''primary'\'''
+++ Log 'Disk '\''/dev/sda'\'': creating partition number 2 with name '\''primary'\'''
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:10.707607196 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:10.707607196 Disk '\''/dev/sda'\'': creating partition number 2 with name '\''primary'\'''
2020-12-17 08:22:10.707607196 Disk '/dev/sda': creating partition number 2 with name 'primary'
+++ Print 'Disk '\''/dev/sda'\'': creating partition number 2 with name '\''primary'\'''
+++ test 1
+++ echo -e 'Disk '\''/dev/sda'\'': creating partition number 2 with name '\''primary'\'''
+++ [[ ! -n 48318382079 ]]
+++ parted -s /dev/sda mkpart primary 537919488B 48318382079B
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ last_partition_number=2
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ parted -s /dev/sda set 2 lvm on
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ sleep 1
+++ partprobe -s /dev/sda
/dev/sda: msdos partitions 1 2
+++ my_udevtrigger
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm trigger
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ delete_dummy_partitions_and_resize_real_ones
+++ [[ 0 -eq 0 ]]
+++ partitions_to_resize=()
+++ current_disk=
+++ disk_label=
+++ last_partition_number=0
+++ return 0
+++ component_created /dev/sda disk
+++ local device=/dev/sda
+++ local type=disk
+++ local touchfile=disk--dev-sda
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sda
+++ create_component /dev/sda1 part
+++ local device=/dev/sda1
+++ local type=part
+++ local touchfile=part--dev-sda1
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/part--dev-sda1 ']'
+++ return 0
+++ component_created /dev/sda1 part
+++ local device=/dev/sda1
+++ local type=part
+++ local touchfile=part--dev-sda1
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/part--dev-sda1
+++ create_component /dev/sda2 part
+++ local device=/dev/sda2
+++ local type=part
+++ local touchfile=part--dev-sda2
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/part--dev-sda2 ']'
+++ return 0
+++ component_created /dev/sda2 part
+++ local device=/dev/sda2
+++ local type=part
+++ local touchfile=part--dev-sda2
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/part--dev-sda2
+++ create_component /dev/sdb disk
+++ local device=/dev/sdb
+++ local type=disk
+++ local touchfile=disk--dev-sdb
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sdb ']'
+++ return 0
+++ Log 'Stop mdadm'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:14.877264485 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:14.877264485 Stop mdadm'
2020-12-17 08:22:14.877264485 Stop mdadm
+++ grep -q md /proc/mdstat
+++ Log 'Erasing MBR of disk /dev/sdb'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:14.880266119 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:14.880266119 Erasing MBR of disk /dev/sdb'
2020-12-17 08:22:14.880266119 Erasing MBR of disk /dev/sdb
+++ dd if=/dev/zero of=/dev/sdb bs=512 count=1
1+0 records in
1+0 records out
512 bytes (512 B) copied, 0.0190267 s, 26.9 kB/s
+++ sync
+++ my_udevtrigger
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm trigger
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ delete_dummy_partitions_and_resize_real_ones
+++ [[ 0 -eq 0 ]]
+++ partitions_to_resize=()
+++ current_disk=
+++ disk_label=
+++ last_partition_number=0
+++ return 0
+++ component_created /dev/sdb disk
+++ local device=/dev/sdb
+++ local type=disk
+++ local touchfile=disk--dev-sdb
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sdb
+++ create_component /dev/sdc disk
+++ local device=/dev/sdc
+++ local type=disk
+++ local touchfile=disk--dev-sdc
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sdc ']'
+++ return 0
+++ Log 'Stop mdadm'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:17.974419975 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:17.974419975 Stop mdadm'
2020-12-17 08:22:17.974419975 Stop mdadm
+++ grep -q md /proc/mdstat
+++ Log 'Erasing MBR of disk /dev/sdc'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:17.978097052 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:17.978097052 Erasing MBR of disk /dev/sdc'
2020-12-17 08:22:17.978097052 Erasing MBR of disk /dev/sdc
+++ dd if=/dev/zero of=/dev/sdc bs=512 count=1
1+0 records in
1+0 records out
512 bytes (512 B) copied, 0.0140059 s, 36.6 kB/s
+++ sync
+++ my_udevtrigger
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm trigger
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ delete_dummy_partitions_and_resize_real_ones
+++ [[ 0 -eq 0 ]]
+++ partitions_to_resize=()
+++ current_disk=
+++ disk_label=
+++ last_partition_number=0
+++ return 0
+++ component_created /dev/sdc disk
+++ local device=/dev/sdc
+++ local type=disk
+++ local touchfile=disk--dev-sdc
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sdc
+++ create_component /dev/sdd disk
+++ local device=/dev/sdd
+++ local type=disk
+++ local touchfile=disk--dev-sdd
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sdd ']'
+++ return 0
+++ Log 'Stop mdadm'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:21.067721931 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:21.067721931 Stop mdadm'
2020-12-17 08:22:21.067721931 Stop mdadm
+++ grep -q md /proc/mdstat
+++ Log 'Erasing MBR of disk /dev/sdd'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:21.070644640 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:21.070644640 Erasing MBR of disk /dev/sdd'
2020-12-17 08:22:21.070644640 Erasing MBR of disk /dev/sdd
+++ dd if=/dev/zero of=/dev/sdd bs=512 count=1
1+0 records in
1+0 records out
512 bytes (512 B) copied, 0.0164703 s, 31.1 kB/s
+++ sync
+++ my_udevtrigger
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm trigger
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ delete_dummy_partitions_and_resize_real_ones
+++ [[ 0 -eq 0 ]]
+++ partitions_to_resize=()
+++ current_disk=
+++ disk_label=
+++ last_partition_number=0
+++ return 0
+++ component_created /dev/sdd disk
+++ local device=/dev/sdd
+++ local type=disk
+++ local touchfile=disk--dev-sdd
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sdd
+++ create_component /dev/sde disk
+++ local device=/dev/sde
+++ local type=disk
+++ local touchfile=disk--dev-sde
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sde ']'
+++ return 0
+++ Log 'Stop mdadm'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:24.165958149 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:24.165958149 Stop mdadm'
2020-12-17 08:22:24.165958149 Stop mdadm
+++ grep -q md /proc/mdstat
+++ Log 'Erasing MBR of disk /dev/sde'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:24.168994780 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:24.168994780 Erasing MBR of disk /dev/sde'
2020-12-17 08:22:24.168994780 Erasing MBR of disk /dev/sde
+++ dd if=/dev/zero of=/dev/sde bs=512 count=1
1+0 records in
1+0 records out
512 bytes (512 B) copied, 0.0182613 s, 28.0 kB/s
+++ sync
+++ my_udevtrigger
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm trigger
+++ return 0
+++ my_udevsettle
+++ has_binary udevadm
+++ for bin in '$@'
+++ type udevadm
+++ return 0
+++ udevadm settle
+++ return 0
+++ delete_dummy_partitions_and_resize_real_ones
+++ [[ 0 -eq 0 ]]
+++ partitions_to_resize=()
+++ current_disk=
+++ disk_label=
+++ last_partition_number=0
+++ return 0
+++ component_created /dev/sde disk
+++ local device=/dev/sde
+++ local type=disk
+++ local touchfile=disk--dev-sde
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/disk--dev-sde
+++ create_component pv:/dev/sdc lvmdev
+++ local device=pv:/dev/sdc
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sdc
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sdc ']'
+++ return 0
+++ LogPrint 'Creating LVM PV /dev/sdc'
+++ Log 'Creating LVM PV /dev/sdc'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:27.268278348 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:27.268278348 Creating LVM PV /dev/sdc'
2020-12-17 08:22:27.268278348 Creating LVM PV /dev/sdc
+++ Print 'Creating LVM PV /dev/sdc'
+++ test 1
+++ echo -e 'Creating LVM PV /dev/sdc'
+++ lvm vgchange -a n vg02
0 logical volume(s) in volume group "vg02" now active
+++ lvm pvcreate -ff --yes -v --uuid UHT8T8-DKQi-UcDg-TTIJ-9IuI-DamD-OkHz3J --norestorefile /dev/sdc
Couldn't find device with uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN.
There are 1 physical volumes missing.
Couldn't find device with uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN.
There are 1 physical volumes missing.
Wiping signatures on new PV /dev/sdc.
Set up physical volume for "/dev/sdc" with 209715200 available sectors.
Zeroing start of device /dev/sdc.
Writing physical volume data to disk "/dev/sdc".
Physical volume "/dev/sdc" successfully created.
+++ component_created pv:/dev/sdc lvmdev
+++ local device=pv:/dev/sdc
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sdc
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sdc
+++ create_component pv:/dev/sda2 lvmdev
+++ local device=pv:/dev/sda2
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sda2
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sda2 ']'
+++ return 0
+++ LogPrint 'Creating LVM PV /dev/sda2'
+++ Log 'Creating LVM PV /dev/sda2'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:27.317088762 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:27.317088762 Creating LVM PV /dev/sda2'
2020-12-17 08:22:27.317088762 Creating LVM PV /dev/sda2
+++ Print 'Creating LVM PV /dev/sda2'
+++ test 1
+++ echo -e 'Creating LVM PV /dev/sda2'
+++ lvm vgchange -a n vg00
Couldn't find device with uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN.
0 logical volume(s) in volume group "vg00" now active
+++ lvm pvcreate -ff --yes -v --uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN --norestorefile /dev/sda2
Couldn't find device with uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN.
There are 1 physical volumes missing.
Couldn't find device with uuid DKN7Dd-D1H2-Kqed-AyaS-z84k-NFPo-NnzUkN.
There are 1 physical volumes missing.
Wiping signatures on new PV /dev/sda2.
Set up physical volume for "/dev/sda2" with 93321216 available sectors.
Zeroing start of device /dev/sda2.
Writing physical volume data to disk "/dev/sda2".
Physical volume "/dev/sda2" successfully created.
+++ component_created pv:/dev/sda2 lvmdev
+++ local device=pv:/dev/sda2
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sda2
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sda2
+++ create_component pv:/dev/sde lvmdev
+++ local device=pv:/dev/sde
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sde
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sde ']'
+++ return 0
+++ LogPrint 'Creating LVM PV /dev/sde'
+++ Log 'Creating LVM PV /dev/sde'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:27.360035926 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:27.360035926 Creating LVM PV /dev/sde'
2020-12-17 08:22:27.360035926 Creating LVM PV /dev/sde
+++ Print 'Creating LVM PV /dev/sde'
+++ test 1
+++ echo -e 'Creating LVM PV /dev/sde'
+++ lvm vgchange -a n vg00
WARNING: Inconsistent metadata found for VG vg00 - updating to use version 14
WARNING: Repairing Physical Volume /dev/sda2 that is in Volume Group vg00 but not marked as used.
WARNING: Inconsistent metadata found for VG vg00 - updating to use version 15
0 logical volume(s) in volume group "vg00" now active
+++ lvm pvcreate -ff --yes -v --uuid MTm8Ad-YHtu-emgm-nqi8-Z6FW-8t2Y-RMlwrz --norestorefile /dev/sde
Wiping signatures on new PV /dev/sde.
Set up physical volume for "/dev/sde" with 20971520 available sectors.
Zeroing start of device /dev/sde.
Writing physical volume data to disk "/dev/sde".
Physical volume "/dev/sde" successfully created.
+++ component_created pv:/dev/sde lvmdev
+++ local device=pv:/dev/sde
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sde
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sde
+++ create_component pv:/dev/sdb lvmdev
+++ local device=pv:/dev/sdb
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sdb
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sdb ']'
+++ return 0
+++ LogPrint 'Creating LVM PV /dev/sdb'
+++ Log 'Creating LVM PV /dev/sdb'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:27.411942651 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:27.411942651 Creating LVM PV /dev/sdb'
2020-12-17 08:22:27.411942651 Creating LVM PV /dev/sdb
+++ Print 'Creating LVM PV /dev/sdb'
+++ test 1
+++ echo -e 'Creating LVM PV /dev/sdb'
+++ lvm vgchange -a n vg01
0 logical volume(s) in volume group "vg01" now active
+++ lvm pvcreate -ff --yes -v --uuid ph05Xa-NkTS-8sEa-K5At-pA8E-Q9hH-GUI63l --norestorefile /dev/sdb
WARNING: Inconsistent metadata found for VG vg00 - updating to use version 16
WARNING: Repairing Physical Volume /dev/sde that is in Volume Group vg00 but not marked as used.
WARNING: Inconsistent metadata found for VG vg00 - updating to use version 17
Wiping signatures on new PV /dev/sdb.
Set up physical volume for "/dev/sdb" with 524288000 available sectors.
Zeroing start of device /dev/sdb.
Writing physical volume data to disk "/dev/sdb".
Physical volume "/dev/sdb" successfully created.
+++ component_created pv:/dev/sdb lvmdev
+++ local device=pv:/dev/sdb
+++ local type=lvmdev
+++ local touchfile=lvmdev-pv:-dev-sdb
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmdev-pv:-dev-sdb
+++ create_component /dev/vg02 lvmgrp
+++ local device=/dev/vg02
+++ local type=lvmgrp
+++ local touchfile=lvmgrp--dev-vg02
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmgrp--dev-vg02 ']'
+++ return 0
+++ create_volume_group=1
+++ create_logical_volumes=1
+++ create_thin_volumes_only=0
+++ '[' 1 -eq 1 ']'
+++ LogPrint 'Creating LVM VG '\''vg02'\''; Warning: some properties may not be preserved...'
+++ Log 'Creating LVM VG '\''vg02'\''; Warning: some properties may not be preserved...'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:27.460164062 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:27.460164062 Creating LVM VG '\''vg02'\''; Warning: some properties may not be preserved...'
2020-12-17 08:22:27.460164062 Creating LVM VG 'vg02'; Warning: some properties may not be preserved...
+++ Print 'Creating LVM VG '\''vg02'\''; Warning: some properties may not be preserved...'
+++ test 1
+++ echo -e 'Creating LVM VG '\''vg02'\''; Warning: some properties may not be preserved...'
+++ '[' -e /dev/vg02 ']'
+++ lvm vgcreate --physicalextentsize 4096k vg02 /dev/sdc
Volume group "vg02" successfully created
+++ lvm vgchange --available y vg02
0 logical volume(s) in volume group "vg02" now active
+++ component_created /dev/vg02 lvmgrp
+++ local device=/dev/vg02
+++ local type=lvmgrp
+++ local touchfile=lvmgrp--dev-vg02
+++ touch /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmgrp--dev-vg02
+++ create_component /dev/vg00 lvmgrp
+++ local device=/dev/vg00
+++ local type=lvmgrp
+++ local touchfile=lvmgrp--dev-vg00
+++ '[' -e /tmp/rear.z4FzZYUX19es1s0/tmp/touch/lvmgrp--dev-vg00 ']'
+++ return 0
+++ create_volume_group=1
+++ create_logical_volumes=1
+++ create_thin_volumes_only=0
+++ '[' 1 -eq 1 ']'
+++ LogPrint 'Creating LVM VG '\''vg00'\''; Warning: some properties may not be preserved...'
+++ Log 'Creating LVM VG '\''vg00'\''; Warning: some properties may not be preserved...'
++++ date '+%Y-%m-%d %H:%M:%S.%N '
+++ local 'timestamp=2020-12-17 08:22:27.508077346 '
+++ test 1 -gt 0
+++ echo '2020-12-17 08:22:27.508077346 Creating LVM VG '\''vg00'\''; Warning: some properties may not be preserved...'
2020-12-17 08:22:27.508077346 Creating LVM VG 'vg00'; Warning: some properties may not be preserved...
+++ Print 'Creating LVM VG '\''vg00'\''; Warning: some properties may not be preserved...'
+++ test 1
+++ echo -e 'Creating LVM VG '\''vg00'\''; Warning: some properties may not be preserved...'
+++ '[' -e /dev/vg00 ']'
+++ lvm vgcreate --physicalextentsize 4096k vg00 /dev/sda2 /dev/sde
A volume group called vg00 already exists.
2020-12-17 08:22:27.527900614 UserInput: called in /usr/share/rear/layout/recreate/default/200_run_layout_code.sh line 127
2020-12-17 08:22:27.531570832 UserInput: Default input in choices - using choice number 1 as default input
2020-12-17 08:22:27.533360057 The disk layout recreation script failed
2020-12-17 08:22:27.535022182 1) Rerun disk recreation script (/var/lib/rear/layout/diskrestore.sh)
2020-12-17 08:22:27.536610048 2) View 'rear recover' log file (/var/log/rear/rear-ITSGBHHLSP01629.log)
2020-12-17 08:22:27.538240312 3) Edit disk recreation script (/var/lib/rear/layout/diskrestore.sh)
2020-12-17 08:22:27.539768723 4) View original disk space usage (/var/lib/rear/layout/config/df.txt)
2020-12-17 08:22:27.541294158 5) Use Relax-and-Recover shell and return back to here
2020-12-17 08:22:27.542805871 6) Abort 'rear recover'
2020-12-17 08:22:27.544371541 (default '1' timeout 300 seconds)
2020-12-17 08:23:55.804307675 UserInput: 'read' got as user input '6'
2020-12-17 08:23:55.808397716 Error detected during restore.
2020-12-17 08:23:55.809979301 Restoring saved original /var/lib/rear/layout/disklayout.conf
2020-12-17 08:23:55.813033158 ERROR: User chose to abort 'rear recover' in /usr/share/rear/layout/recreate/default/200_run_layout_code.sh
==== Stack trace ====
Trace 0: /bin/rear:547 main
Trace 1: /usr/share/rear/lib/recover-workflow.sh:33 WORKFLOW_recover
Trace 2: /usr/share/rear/lib/framework-functions.sh:101 SourceStage
Trace 3: /usr/share/rear/lib/framework-functions.sh:49 Source
Trace 4: /usr/share/rear/layout/recreate/default/200_run_layout_code.sh:153 source
Message: User chose to abort 'rear recover' in /usr/share/rear/layout/recreate/default/200_run_layout_code.sh
== End stack trace ==
2020-12-17 08:23:55.819401548 Exiting rear recover (PID 747) and its descendant processes
2020-12-17 08:23:56.832531838 rear,747 /bin/rear -v recover
`-rear,3734 /bin/rear -v recover
`-pstree,3735 -Aplau 747
/usr/share/rear/lib/_input-output-functions.sh: line 157: kill: (3738) - No such process
2020-12-17 08:23:56.851647881 Running exit tasks
2020-12-17 08:23:56.857821198 Finished in 137 seconds
2020-12-17 08:23:56.859348447 Removing build area /tmp/rear.z4FzZYUX19es1s0
removed directory: '/tmp/rear.z4FzZYUX19es1s0'
2020-12-17 08:23:56.866374573 End of program reached
RESCUE ITSGBHHLSP01629:~ #
RESCUE ITSGBHHLSP01629:~ # reboot
umounting all filesystems
/var/lib/nfs/rpc_pipefs : ignored
/sys/fs/cgroup/hugetlb : successfully umounted
/sys/fs/cgroup/perf_event: successfully umounted
/sys/fs/cgroup/memory : successfully umounted
/sys/fs/cgroup/pids : successfully umounted
/sys/fs/cgroup/net_cls,net_prio: successfully umounted
/sys/fs/cgroup/cpuset : successfully umounted
/sys/fs/cgroup/cpu,cpuacct: successfully umounted
/sys/fs/cgroup/freezer : successfully umounted
/sys/fs/cgroup/blkio : successfully umounted
/sys/fs/cgroup/devices : successfully umounted
/sys/fs/pstore : successfully umounted
umount: /sys/fs/cgroup/systemd: not mounted
/sys/fs/cgroup/systemd : successfully umounted
/sys/fs/cgroup : successfully umounted
umount: /run: target is busy.
(In some cases useful info about processes that use
the device is found by lsof(8) or fuser(1))
/run : successfully umounted
/dev/pts : ignored
/dev/shm : successfully umounted
/sys/kernel/security : successfully umounted
umount: /dev: target is busy.
(In some cases useful info about processes that use
the device is found by lsof(8) or fuser(1))
/dev : successfully umounted
/proc : ignored
/sys : ignored
umount: /: not mounted
/ : successfully umounted
syncing disks... waiting 3 seconds before reboot
RESCUE ITSGBHHLSP01629:~ # Using username "root".
Pre-authentication banner message from server:
|
|
| Relax-and-Recover 2.4 / Git
|
| Relax-and-Recover comes with ABSOLUTELY NO WARRANTY; for details see
| the GNU General Public License at: http://www.gnu.org/licenses/gpl.html
|
| Host ITSGBHHLSP01629 using Backup NETFS and Output ISO
| Build date: Tue, 15 Dec 2020 16:43:07 +0100
|
|
| #############################################################################
| # #
| # WARNING NOTICE: This system is restricted solely to Johnson & Johnson #
| # users for authorized business only. Any actual or attempted unauthorized #
| # access, use or modification of this system is strictly prohibited by #
| # Johnson & Johnson. Unauthorized users are subject to Johnson & Johnson #
| # disciplinary proceedings and/or criminal and civil penalties under state, #
| # federal or other applicable domestic and foreign laws. The use of this #
| # system may be monitored and recorded for administrative and security #
| # reasons. If such monitoring and/or recording reveal possible evidence of #
| # criminal activity, Johnson & Johnson may provide the evidence of such #
| # monitoring to law enforcement officials. #
| # #
| #############################################################################
|
| SSH fingerprint: 2048 SHA256:lwc9mFkFzUSYWzFIWyEveqfLjsut5dmxSYcl5/n8Dpg root
> @ITSGBHHLSP01629 (RSA)
|
End of banner message from server
Server refused our key
root@10.180.4.48's password:
Welcome to Relax-and-Recover. Run "rear recover" to restore your system !
RESCUE ITSGBHHLSP01629:~ # pvs
PV VG Fmt Attr PSize PFree
/dev/sda2 vg00 lvm2 a-- <44.50g 300.00m
/dev/sdb lvm2 --- 250.00g 250.00g
/dev/sdc vg02 lvm2 a-- <100.00g <100.00g
/dev/sde vg00 lvm2 a-- <10.00g <7.00g
RESCUE ITSGBHHLSP01629:~ # lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lv_audit vg00 -wi------- 4.00g
lv_home vg00 -wi------- 4.00g
lv_log vg00 -wi------- 4.00g
lv_openv vg00 -wi------- 5.20g
lv_opt vg00 -wi------- 5.00g
lv_root vg00 -wi------- 8.00g
lv_tanium vg00 -wi------- 3.00g
lv_tmp vg00 -wi------- 2.00g
lv_var vg00 -wi------- 8.00g
swap vg00 -wi------- 4.00g
RESCUE ITSGBHHLSP01629:~ # vgs
VG #PV #LV #SN Attr VSize VFree
vg00 2 10 0 wz--n- 54.49g <7.29g
vg02 1 0 0 wz--n- <100.00g <100.00g
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment