Skip to content

Instantly share code, notes, and snippets.

@nochmu
Last active May 16, 2019 00:42
Show Gist options
  • Save nochmu/53851f7d034dc39989db105426b94531 to your computer and use it in GitHub Desktop.
Save nochmu/53851f7d034dc39989db105426b94531 to your computer and use it in GitHub Desktop.
LVM: Move the root Filesystem to RAID 1 without downtime!
# Move the root filesystem from single disk to a RAID 1 without downtime
root$ df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.7G 0 7.7G 0% /dev
tmpfs 7.7G 0 7.7G 0% /dev/shm
tmpfs 7.7G 8.8M 7.7G 1% /run
tmpfs 7.7G 0 7.7G 0% /sys/fs/cgroup
/dev/mapper/vg-root 5.2G 1.4G 3.5G 29% /
/dev/sdb1 922M 143M 732M 17% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
# Create RAID 1: /dev/md0 = (/dev/sda, /dev/sdc)
root$ sudo mdadm --create /dev/md0 --auto md --level=1 --raid-devices=2 /dev/sda /dev/sdc
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
root$ cat /proc/mdstat
Personalities : [raid1]
md0 : active raid1 sdc[1] sda[0]
2930134464 blocks super 1.2 [2/2] [UU]
[>....................] resync = 0.2% (6527104/2930134464) finish=395.1min speed=123312K/sec
bitmap: 22/22 pages [88KB], 65536KB chunk
unused devices: <none>
root$ mdadm --examine /dev/sd[ac]
/dev/sda:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x1
Array UUID : 812f8b4b:eec3b59b:c5f0f680:c525b1aa
Name : n0s0:0 (local to host n0s0)
Creation Time : Thu May 16 01:10:45 2019
Raid Level : raid1
Raid Devices : 2
Avail Dev Size : 5860268976 (2794.39 GiB 3000.46 GB)
Array Size : 2930134464 (2794.39 GiB 3000.46 GB)
Used Dev Size : 5860268928 (2794.39 GiB 3000.46 GB)
Data Offset : 264192 sectors
Super Offset : 8 sectors
Unused Space : before=264112 sectors, after=48 sectors
State : active
Device UUID : 3222008a:8b67b467:5d7a89d0:000b55dc
Internal Bitmap : 8 sectors from superblock
Update Time : Thu May 16 01:12:50 2019
Bad Block Log : 512 entries available at offset 24 sectors
Checksum : ff7a421c - correct
Events : 25
Device Role : Active device 0
Array State : AA ('A' == active, '.' == missing, 'R' == replacing)
/dev/sdc:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x1
Array UUID : 812f8b4b:eec3b59b:c5f0f680:c525b1aa
Name : n0s0:0 (local to host n0s0)
Creation Time : Thu May 16 01:10:45 2019
Raid Level : raid1
Raid Devices : 2
Avail Dev Size : 5860268976 (2794.39 GiB 3000.46 GB)
Array Size : 2930134464 (2794.39 GiB 3000.46 GB)
Used Dev Size : 5860268928 (2794.39 GiB 3000.46 GB)
Data Offset : 264192 sectors
Super Offset : 8 sectors
Unused Space : before=264112 sectors, after=48 sectors
State : active
Device UUID : f42f92c1:8e8c6347:53fee391:8fd6cfa1
Internal Bitmap : 8 sectors from superblock
Update Time : Thu May 16 01:12:50 2019
Bad Block Log : 512 entries available at offset 24 sectors
Checksum : 9d90c467 - correct
Events : 25
Device Role : Active device 1
Array State : AA ('A' == active, '.' == missing, 'R' == replacing)
root$ mdadm --detail /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Thu May 16 01:10:45 2019
Raid Level : raid1
Array Size : 2930134464 (2794.39 GiB 3000.46 GB)
Used Dev Size : 2930134464 (2794.39 GiB 3000.46 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Intent Bitmap : Internal
Update Time : Thu May 16 01:13:35 2019
State : clean, resyncing
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : bitmap
Resync Status : 0% complete
Name : n0s0:0 (local to host n0s0)
UUID : 812f8b4b:eec3b59b:c5f0f680:c525b1aa
Events : 34
Number Major Minor RaidDevice State
0 8 0 0 active sync /dev/sda
1 8 32 1 active sync /dev/sdc
# Add Raid 1 to the volume group
root$ pvcreate /dev/md0
Physical volume "/dev/md0" successfully created.
root$ pvs
PV VG Fmt Attr PSize PFree
/dev/md0 lvm2 --- <2.73t <2.73t
/dev/sdb2 vg lvm2 a-- <13.39g 0
root$ vgextend vg /dev/md0
Volume group "vg" successfully extended
root$ lvextend -r -L 200GiB vg/root
Size of logical volume vg/root changed from <5.39 GiB (1379 extents) to 200.00 GiB (51200 extents).
Logical volume vg/root successfully resized.
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/mapper/vg-root is mounted on /; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 25
The filesystem on /dev/mapper/vg-root is now 52428800 blocks long.
root$ lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root vg -wi-ao---- 200.00g
swap vg -wi-ao---- 8.00g
root$ df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 7.7G 0 7.7G 0% /dev
tmpfs 7.7G 0 7.7G 0% /dev/shm
tmpfs 7.7G 8.8M 7.7G 1% /run
tmpfs 7.7G 0 7.7G 0% /sys/fs/cgroup
/dev/mapper/vg-root 197G 1.5G 188G 1% /
/dev/sdb1 922M 143M 732M 17% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/0
# Moves the extends from the old device to the new RAID 1
root$ pvmove /dev/sdb2 /dev/md0
/dev/sdb2: Moved: 0.00%
/dev/sdb2: Moved: 8.02%
/dev/sdb2: Moved: 43.95%
/dev/sdb2: Moved: 75.05%
/dev/sdb2: Moved: 100.00%
root$ pvs
PV VG Fmt Attr PSize PFree
/dev/md0 vg lvm2 a-- <2.73t <2.53t
/dev/sdb2 vg lvm2 a-- <13.39g <13.39g
# Remove the old device
root$ vgreduce vg /dev/sdb2
Removed "/dev/sdb2" from volume group "vg"
root$ pvremove /dev/sdb2
Labels on physical volume "/dev/sdb2" successfully wiped.
# Save the configuration
root$ mdadm --detail --scan --verbose >> /etc/mdadm.conf
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment