Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Mounting a raid drive on my Netgear readynas
mount
fsck /dev/sdb1
parted /dev/sda 'print'
lsblk -f
blkid
mount /dev/sba1 ~/mountpoint
mdadm --assemble --run /dev/md0 /dev/sdb1
mdadm --assemble --run /dev/md3 /dev/sdb3
mount /dev/md3 ~/old-disk/
mdadm --detail /dev/md127
# mount
/dev/md0 on / type ext4 (rw,noatime,nodiratime,data=ordered)
/dev/md127 on /data type btrfs (rw,noatime,nodiratime,nodatasum,nospace_cache,subvolid=5,subvol=/)
/dev/md0 on /run/nfs4/home type ext4 (rw,noatime,nodiratime,data=ordered)
...
# lsblk -f
NAME FSTYPE LABEL UUID MOUNTPOINT
sda
├─sda1 linux_raid_member 2fe53b89:0 7f32aec1-c2b2-1276-5cf8-c984fa945b56
│ └─md0 ext4 2fe53b89:root ccb2bb10-faf0-4f33-9b05-79232317bfb0 /
├─sda2 linux_raid_member 2fe53b89:1 953dd93b-7743-3d27-371b-b09858f73957
│ └─md1 swap swap 6b2e6fb6-6d63-44c0-a340-73a81b78bdbd [SWAP]
└─sda3 linux_raid_member 2fe53b89:data-0 5511f0ec-9950-a668-9a4e-f5864fab40fb
└─md127 btrfs 2fe53b89:data 6fb18eb7-d6f8-49ae-a8bc-8d16c193fbcd /data
sdb
├─sdb1 linux_raid_member 2fe53b89:0 7f32aec1-c2b2-1276-5cf8-c984fa945b56
│ └─md0 ext4 2fe53b89:root ccb2bb10-faf0-4f33-9b05-79232317bfb0 /
├─sdb2 linux_raid_member 2fe53b89:1 953dd93b-7743-3d27-371b-b09858f73957
│ └─md1 swap swap 6b2e6fb6-6d63-44c0-a340-73a81b78bdbd [SWAP]
└─sdb3 linux_raid_member 2fe53b89:data-0 5511f0ec-9950-a668-9a4e-f5864fab40fb
└─md127 btrfs 2fe53b89:data 6fb18eb7-d6f8-49ae-a8bc-8d16c193fbcd /data
root@nas:~# blkid
/dev/md/data-0: LABEL="2fe53b89:data" UUID="6fb18eb7-d6f8-49ae-a8bc-8d16c193fbcd" UUID_SUB="d1c3b76c-683d-4086-b9b3-2df2d15fcbc7" TYPE="btrfs"
/dev/md0: LABEL="2fe53b89:root" UUID="ccb2bb10-faf0-4f33-9b05-79232317bfb0" TYPE="ext4"
/dev/md1: LABEL="swap" UUID="6b2e6fb6-6d63-44c0-a340-73a81b78bdbd" TYPE="swap"
/dev/sda1: UUID="7f32aec1-c2b2-1276-5cf8-c984fa945b56" UUID_SUB="effb7efd-62d6-7031-9ac7-1d7bc7c846b6" LABEL="2fe53b89:0" TYPE="linux_raid_member" PARTUUID="11ab9914-6c03-427e-b679-e14150f4c305"
/dev/sda2: UUID="953dd93b-7743-3d27-371b-b09858f73957" UUID_SUB="1d830e0a-eada-1288-5866-e324df77e173" LABEL="2fe53b89:1" TYPE="linux_raid_member" PARTUUID="d76ed9f3-64e6-4bed-9dfe-607199afd703"
/dev/sda3: UUID="5511f0ec-9950-a668-9a4e-f5864fab40fb" UUID_SUB="57bab088-6143-fa3f-3297-727c730c4388" LABEL="2fe53b89:data-0" TYPE="linux_raid_member" PARTUUID="e701e06c-999f-4f05-983e-9dc487f18fc8"
/dev/sdb1: UUID="7f32aec1-c2b2-1276-5cf8-c984fa945b56" UUID_SUB="0c863e5f-2aea-736c-31d9-5d3384c9457c" LABEL="2fe53b89:0" TYPE="linux_raid_member" PARTUUID="ae491ae6-3f16-4139-97be-6cf3cf42eca3"
/dev/sdb2: UUID="953dd93b-7743-3d27-371b-b09858f73957" UUID_SUB="c1b76bc0-6538-d2be-a6f4-0a17df7b6323" LABEL="2fe53b89:1" TYPE="linux_raid_member" PARTUUID="ddfa97de-6c5c-42c2-ac8a-b4271ae106a8"
/dev/sdb3: UUID="5511f0ec-9950-a668-9a4e-f5864fab40fb" UUID_SUB="01978edd-b383-2b45-7e42-5d34f779e5f3" LABEL="2fe53b89:data-0" TYPE="linux_raid_member" PARTUUID="5e7fe2d5-fd9d-4242-ab04-1dc8528f76d2"
/dev/md127: LABEL="2fe53b89:data" UUID="6fb18eb7-d6f8-49ae-a8bc-8d16c193fbcd" UUID_SUB="d1c3b76c-683d-4086-b9b3-2df2d15fcbc7" TYPE="btrfs"
/dev/ubi0_0: UUID="54c651e0-a819-4cc7-9b81-8b977fbb046d" TYPE="ubifs"
root@nas:~# umount /data
root@nas:~# mount
udev on /dev type devtmpfs (rw,noatime,nodiratime,size=10240k,nr_inodes=63129,mode=755)
/dev/md0 on / type ext4 (rw,noatime,nodiratime,data=ordered)
root@nas:~# mdadm --detail /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Thu Jan 9 21:36:33 2020
Raid Level : raid1
Array Size : 4190208 (4.00 GiB 4.29 GB)
Used Dev Size : 4190208 (4.00 GiB 4.29 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Sat Jan 25 18:35:22 2020
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : unknown
Name : 2fe53b89:0 (local to host 2fe53b89)
UUID : 7f32aec1:c2b21276:5cf8c984:fa945b56
Events : 653
Number Major Minor RaidDevice State
0 8 1 0 active sync /dev/sda1
2 8 17 1 active sync /dev/sdb1
root@nas:~# mdadm --detail /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Sat Jan 25 11:18:45 2020
Raid Level : raid1
Array Size : 523264 (511.00 MiB 535.82 MB)
Used Dev Size : 523264 (511.00 MiB 535.82 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Sat Jan 25 11:22:34 2020
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : unknown
Name : 2fe53b89:1 (local to host 2fe53b89)
UUID : e2b7b0c7:50e08865:8bb4c199:fed6e36e
Events : 35
Number Major Minor RaidDevice State
0 8 2 0 active sync /dev/sda2
1 8 18 1 active sync /dev/sdb2
root@nas:~# mdadm --detail /dev/md127
/dev/md127:
Version : 1.2
Creation Time : Thu Jan 9 21:37:23 2020
Raid Level : raid1
Array Size : 1948664832 (1858.39 GiB 1995.43 GB)
Used Dev Size : 1948664832 (1858.39 GiB 1995.43 GB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Sat Jan 25 18:32:11 2020
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : unknown
Name : 2fe53b89:data-0 (local to host 2fe53b89)
UUID : 5511f0ec:9950a668:9a4ef586:4fab40fb
Events : 707
Number Major Minor RaidDevice State
0 8 3 0 active sync /dev/sda3
2 8 19 1 active sync /dev/sdb3
root@nas:~# fdisk -l
Disk /dev/mtdblock0: 1.5 MiB, 1572864 bytes, 3072 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock1: 512 KiB, 524288 bytes, 1024 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock2: 6 MiB, 6291456 bytes, 12288 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock3: 4 MiB, 4194304 bytes, 8192 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock4: 116 MiB, 121634816 bytes, 237568 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/sda: 1.8 TiB, 2000398934016 bytes, 3907029168 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disklabel type: gpt
Disk identifier: 80EAF84B-14A4-4E63-AD8E-26C98B92CA81
Device Start End Sectors Size Type
/dev/sda1 64 8388671 8388608 4G Linux RAID
/dev/sda2 8388672 9437247 1048576 512M Linux RAID
/dev/sda3 9437248 3907029119 3897591872 1.8T Linux RAID
Disk /dev/sdb: 1.8 TiB, 2000398934016 bytes, 3907029168 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disklabel type: gpt
Disk identifier: 14D89F7A-C877-428C-B35F-5A9F9728FA94
Device Start End Sectors Size Type
/dev/sdb1 64 8388671 8388608 4G Linux RAID
/dev/sdb2 8388672 9437247 1048576 512M Linux RAID
/dev/sdb3 9437248 3907029119 3897591872 1.8T Linux RAID
Disk /dev/md0: 4 GiB, 4290772992 bytes, 8380416 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disk /dev/md1: 511 MiB, 535822336 bytes, 1046528 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disk /dev/md127: 1.8 TiB, 1995432787968 bytes, 3897329664 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
# cat /etc/mdadm/mdadm.conf
CREATE owner=root group=disk mode=0660 auto=yes
# cat /etc/fstab
LABEL=2fe53b89:data /data btrfs defaults,nodatasum 0 0
root@nas:~# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid10] [raid6] [raid5] [raid4]
md1 : active raid1 sda2[0] sdb2[1]
523264 blocks super 1.2 [2/2] [UU]
md0 : active raid1 sda1[0] sdb1[2]
4190208 blocks super 1.2 [2/2] [UU]
unused devices: <none>
# umount /dev/md127
# mdadm --stop /dev/md127
mdadm: stopped /dev/md127
==================
root@nas:~# fdisk -l
Disk /dev/mtdblock0: 1.5 MiB, 1572864 bytes, 3072 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock1: 512 KiB, 524288 bytes, 1024 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock2: 6 MiB, 6291456 bytes, 12288 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock3: 4 MiB, 4194304 bytes, 8192 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mtdblock4: 116 MiB, 121634816 bytes, 237568 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/sda: 1.8 TiB, 2000398934016 bytes, 3907029168 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disklabel type: gpt
Disk identifier: 4BE2BEAB-B9F9-4D61-BE64-78EFA9FF6104
Device Start End Sectors Size Type
/dev/sda1 64 8388671 8388608 4G Linux RAID
/dev/sda2 8388672 9437247 1048576 512M Linux RAID
/dev/sda3 9437248 3907029119 3897591872 1.8T Linux RAID
Disk /dev/sdb: 1.8 TiB, 2000398934016 bytes, 3907029168 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disklabel type: gpt
Disk identifier: 80EAF84B-14A4-4E63-AD8E-26C98B92CA81
Device Start End Sectors Size Type
/dev/sdb1 64 8388671 8388608 4G Linux RAID
/dev/sdb2 8388672 9437247 1048576 512M Linux RAID
/dev/sdb3 9437248 3907029119 3897591872 1.8T Linux RAID
Disk /dev/md0: 4 GiB, 4290772992 bytes, 8380416 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disk /dev/md1: 511.4 MiB, 536281088 bytes, 1047424 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
Disk /dev/md127: 1.8 TiB, 1995432787968 bytes, 3897329664 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
root@nas:~# mount
/dev/md0 on / type ext4 (rw,noatime,nodiratime,data=ordered)
/dev/md127 on /data type btrfs (rw,noatime,nodiratime,nodatasum,nospace_cache,subvolid=5,subvol=/)
/dev/md127 on /apps type btrfs (rw,noatime,nodiratime,nodatasum,nospace_cache,subvolid=258,subvol=/.apps)
/dev/md127 on /home type btrfs (rw,noatime,nodiratime,nodatasum,nospace_cache,subvolid=257,subvol=/home)
/dev/md127 on /run/nfs4/home type btrfs (rw,noatime,nodiratime,nodatasum,nospace_cache,subvolid=257,subvol=/home)
root@nas:~# lsblk -f
NAME FSTYPE LABEL UUID MOUNTPOINT
sda
├─sda1 linux_raid_member 2fe53b89:0 b4909fb8-68f2-7174-0c0f-ebf67ebe238e
│ └─md0 ext4 2fe53b89:root e0b8f44a-9778-4808-915e-b686596ed4a5 /
├─sda2 linux_raid_member 2fe53b89:1 2cfe8545-64e4-f022-7b8e-c27f924f3cbe
│ └─md1 swap swap 4ea2dd0f-0d48-4219-a93a-a17a417cab3a [SWAP]
└─sda3 linux_raid_member 2fe53b89:data-0 ed736fbf-23bf-d56c-c1f1-06601dcc979c
└─md127 btrfs 2fe53b89:data 4189dcbb-beeb-4745-928e-dbd99aa10412 /data
sdb
├─sdb1 linux_raid_member 2fe53b89:0 7f32aec1-c2b2-1276-5cf8-c984fa945b56
├─sdb2 linux_raid_member 2fe53b89:1 1bb6eb6f-6b3d-906d-104c-1772b0fc873c
└─sdb3 linux_raid_member 2fe53b89:data-0 5511f0ec-9950-a668-9a4e-f5864fab40fb
root@nas:~# blkid
/dev/md/data-0: LABEL="2fe53b89:data" UUID="4189dcbb-beeb-4745-928e-dbd99aa10412" UUID_SUB="fbd8c099-3563-4a41-9c4e-60e4ec41049c" TYPE="btrfs"
/dev/sda1: UUID="b4909fb8-68f2-7174-0c0f-ebf67ebe238e" UUID_SUB="6f0e0a91-5df1-b745-7e9c-1c0f9787086e" LABEL="2fe53b89:0" TYPE="linux_raid_member" PARTUUID="8ebf945a-af4f-4cbf-a905-4d806ad89d5a"
/dev/md0: LABEL="2fe53b89:root" UUID="e0b8f44a-9778-4808-915e-b686596ed4a5" TYPE="ext4"
/dev/sda2: UUID="2cfe8545-64e4-f022-7b8e-c27f924f3cbe" UUID_SUB="e8862131-f39b-783a-6f11-5f1c8f42f297" LABEL="2fe53b89:1" TYPE="linux_raid_member" PARTUUID="23e2a140-38c2-4b6d-920c-381015767173"
/dev/md1: LABEL="swap" UUID="4ea2dd0f-0d48-4219-a93a-a17a417cab3a" TYPE="swap"
/dev/sda3: UUID="ed736fbf-23bf-d56c-c1f1-06601dcc979c" UUID_SUB="9a70dc9e-8ba5-ba6c-1450-f0fff5e0f372" LABEL="2fe53b89:data-0" TYPE="linux_raid_member" PARTUUID="03fa0d46-db7b-401c-83f8-847a9c841cd5"
/dev/md127: LABEL="2fe53b89:data" UUID="4189dcbb-beeb-4745-928e-dbd99aa10412" UUID_SUB="fbd8c099-3563-4a41-9c4e-60e4ec41049c" TYPE="btrfs"
/dev/sdb1: UUID="7f32aec1-c2b2-1276-5cf8-c984fa945b56" UUID_SUB="effb7efd-62d6-7031-9ac7-1d7bc7c846b6" LABEL="2fe53b89:0" TYPE="linux_raid_member" PARTUUID="11ab9914-6c03-427e-b679-e14150f4c305"
/dev/sdb2: UUID="1bb6eb6f-6b3d-906d-104c-1772b0fc873c" UUID_SUB="4613e0c9-3e01-a0e2-1b68-6207ab5ca624" LABEL="2fe53b89:1" TYPE="linux_raid_member" PARTUUID="d76ed9f3-64e6-4bed-9dfe-607199afd703"
/dev/sdb3: UUID="5511f0ec-9950-a668-9a4e-f5864fab40fb" UUID_SUB="57bab088-6143-fa3f-3297-727c730c4388" LABEL="2fe53b89:data-0" TYPE="linux_raid_member" PARTUUID="e701e06c-999f-4f05-983e-9dc487f18fc8"
/dev/ubi0_0: UUID="54c651e0-a819-4cc7-9b81-8b977fbb046d" TYPE="ubifs"
# mdadm --detail /dev/md0
/dev/md0:
Version : 1.2
Creation Time : Sun Jan 26 16:53:29 2020
Raid Level : raid1
Array Size : 4190208 (4.00 GiB 4.29 GB)
Used Dev Size : 4190208 (4.00 GiB 4.29 GB)
Raid Devices : 1
Total Devices : 1
Persistence : Superblock is persistent
Update Time : Sun Jan 26 17:19:59 2020
State : clean
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Consistency Policy : unknown
Name : 2fe53b89:0 (local to host 2fe53b89)
UUID : b4909fb8:68f27174:0c0febf6:7ebe238e
Events : 2
Number Major Minor RaidDevice State
0 8 1 0 active sync /dev/sda1
root@nas:~# mdadm --detail /dev/md1
/dev/md1:
Version : 1.2
Creation Time : Sun Jan 26 16:53:29 2020
Raid Level : raid1
Array Size : 523712 (511.44 MiB 536.28 MB)
Used Dev Size : 523712 (511.44 MiB 536.28 MB)
Raid Devices : 1
Total Devices : 1
Persistence : Superblock is persistent
Update Time : Sun Jan 26 16:53:29 2020
State : clean
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Consistency Policy : unknown
Name : 2fe53b89:1 (local to host 2fe53b89)
UUID : 2cfe8545:64e4f022:7b8ec27f:924f3cbe
Events : 2
Number Major Minor RaidDevice State
0 8 2 0 active sync /dev/sda2
root@nas:~# mdadm --detail /dev/md127
/dev/md127:
Version : 1.2
Creation Time : Sun Jan 26 16:54:20 2020
Raid Level : raid1
Array Size : 1948664832 (1858.39 GiB 1995.43 GB)
Used Dev Size : 1948664832 (1858.39 GiB 1995.43 GB)
Raid Devices : 1
Total Devices : 1
Persistence : Superblock is persistent
Update Time : Sun Jan 26 17:17:12 2020
State : clean
Active Devices : 1
Working Devices : 1
Failed Devices : 0
Spare Devices : 0
Consistency Policy : unknown
Name : 2fe53b89:data-0 (local to host 2fe53b89)
UUID : ed736fbf:23bfd56c:c1f10660:1dcc979c
Events : 4
Number Major Minor RaidDevice State
0 8 3 0 active sync /dev/sda3
root@nas:~# mdadm --examine /dev/sdb3
/dev/sdb3:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x0
Array UUID : 5511f0ec:9950a668:9a4ef586:4fab40fb
Name : 2fe53b89:data-0 (local to host 2fe53b89)
Creation Time : Thu Jan 9 21:37:23 2020
Raid Level : raid1
Raid Devices : 2
Avail Dev Size : 3897329728 (1858.39 GiB 1995.43 GB)
Array Size : 1948664832 (1858.39 GiB 1995.43 GB)
Used Dev Size : 3897329664 (1858.39 GiB 1995.43 GB)
Data Offset : 262144 sectors
Super Offset : 8 sectors
Unused Space : before=262056 sectors, after=64 sectors
State : clean
Device UUID : 57bab088:6143fa3f:3297727c:730c4388
Update Time : Sun Jan 26 16:40:50 2020
Bad Block Log : 512 entries available at offset 72 sectors
Checksum : be311806 - correct
Events : 723
Device Role : Active device 0
Array State : A. ('A' == active, '.' == missing, 'R' == replacing)
root@nas:~# mdadm --examine /dev/sda3
/dev/sda3:
Magic : a92b4efc
Version : 1.2
Feature Map : 0x0
Array UUID : ed736fbf:23bfd56c:c1f10660:1dcc979c
Name : 2fe53b89:data-0 (local to host 2fe53b89)
Creation Time : Sun Jan 26 16:54:20 2020
Raid Level : raid1
Raid Devices : 1
Avail Dev Size : 3897329728 (1858.39 GiB 1995.43 GB)
Array Size : 1948664832 (1858.39 GiB 1995.43 GB)
Used Dev Size : 3897329664 (1858.39 GiB 1995.43 GB)
Data Offset : 262144 sectors
Super Offset : 8 sectors
Unused Space : before=262056 sectors, after=64 sectors
State : clean
Device UUID : 9a70dc9e:8ba5ba6c:1450f0ff:f5e0f372
Update Time : Sun Jan 26 17:42:07 2020
Bad Block Log : 512 entries available at offset 72 sectors
Checksum : c07985a4 - correct
Events : 4
Device Role : Active device 0
Array State : A ('A' == active, '.' == missing, 'R' == replacing)
----
I needed to create md virtual device using mdadm:
$ sudo mdadm -A -R /dev/md9 /dev/sdd4
mdadm: /dev/md9 has been started with 1 drive (out of 2).
So it could be mounted without hassle:
$ sudo mount /dev/md9 /mnt/old_hdd/
$ mount | grep ^/dev/md9
/dev/md9 on /mnt/old_hdd type ext4 (rw)
After data was moved I unmounted file system and removed md virtual device.
$ sudo umount /mnt/old_hdd
$ sudo mdadm -S /dev/md9
mdadm: stopped /dev/md9
----
Usage: mdadm --create device options...
Create a new array from unused devices.
mdadm --assemble device options...
Assemble a previously created array.
mdadm --build device options...
Create or assemble an array without metadata.
mdadm --manage device options...
make changes to an existing array.
mdadm --misc options... devices
report on or modify various md related devices.
mdadm --grow options device
resize/reshape an active array
mdadm --incremental device
add/remove a device to/from an array as appropriate
mdadm --monitor options...
Monitor one or more array for significant changes.
mdadm device options...
Shorthand for --manage.
Any parameter that does not start with '-' is treated as a device name
or, for --examine-bitmap, a file name.
The first such name is often the name of an md device. Subsequent
names are often names of component devices.
--
# mdadm --assemble --help
Usage: mdadm --assemble device options...
mdadm --assemble --scan options...
This usage assembles one or more raid arrays from pre-existing
components.
For each array, mdadm needs to know the md device, the identity of
the array, and a number of sub devices. These can be found in a number
of ways.
The md device is given on the command line, is found listed in the
config file, or can be deduced from the array identity.
The array identity is determined either from the --uuid, --name, or
--super-minor commandline arguments, from the config file,
or from the first component device on the command line.
The different combinations of these are as follows:
If the --scan option is not given, then only devices and identities
listed on the command line are considered.
The first device will be the array device, and the remainder will be
examined when looking for components.
If an explicit identity is given with --uuid or --super-minor, then
only devices with a superblock which matches that identity is considered,
otherwise every device listed is considered.
If the --scan option is given, and no devices are listed, then
every array listed in the config file is considered for assembly.
The identity of candidate devices are determined from the config file.
After these arrays are assembled, mdadm will look for other devices
that could form further arrays and tries to assemble them. This can
be disabled using the 'AUTO' option in the config file.
If the --scan option is given as well as one or more devices, then
Those devices are md devices that are to be assembled. Their identity
and components are determined from the config file.
If mdadm can not find all of the components for an array, it will assemble
it but not activate it unless --run or --scan is given. To preserve this
behaviour even with --scan, add --no-degraded. Note that "all of the
components" means as many as were present the last time the array was running
as recorded in the superblock. If the array was already degraded, and
the missing device is not a new problem, it will still be assembled. It
is only newly missing devices that cause the array not to be started.
Options that are valid with --assemble (-A) are:
--bitmap= : bitmap file to use with the array
--uuid= -u : uuid of array to assemble. Devices which don't
have this uuid are excluded
--super-minor= -m : minor number to look for in super-block when
choosing devices to use.
--name= -N : Array name to look for in super-block.
--config= -c : config file
--scan -s : scan config file for missing information
--run -R : Try to start the array even if not enough devices
for a full array are present
--force -f : Assemble the array even if some superblocks appear
: out-of-date. This involves modifying the superblocks.
--update= -U : Update superblock: try '-A --update=?' for option list.
--no-degraded : Assemble but do not start degraded arrays.
--readonly -o : Mark the array as read-only. No resync will start.
----
mdadm --assemble --readonly /dev/md8 /dev/sdb3
mount /dev/md8 /mnt/old_disk
# cat /proc/mdstat
Personalities : [raid0] [raid1] [raid10] [raid6] [raid5] [raid4]
md8 : active (read-only) raid1 sdb3[0]
1948664832 blocks super 1.2 [2/1] [U_]
md127 : active raid1 sda3[0]
1948664832 blocks super 1.2 [1/1] [U]
md1 : active raid1 sda2[0]
523712 blocks super 1.2 [1/1] [U]
md0 : active raid1 sda1[0]
4190208 blocks super 1.2 [1/1] [U]
unused devices: <none>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment