Skip to content

Instantly share code, notes, and snippets.

@mz0
Last active April 18, 2019 14:14
Show Gist options
  • Save mz0/37044fee1e34977f35726abe80d6caf7 to your computer and use it in GitHub Desktop.
Save mz0/37044fee1e34977f35726abe80d6caf7 to your computer and use it in GitHub Desktop.
ZFS

https://github.com/zfsonlinux/zfs/wiki/Ubuntu-18.04-Root-on-ZFS

2.3 Create the root pool: 2.3a Unencrypted:

Delete GPT copy at end of disk:

# dd if=/dev/zero of=/dev/disk/by-id/ata-TOSHIBA_HDWD130_95FE8EEKS bs=4k seek=732566645 count=1
## Verify:
# dd if=/dev/disk/by-id/ata-TOSHIBA_HDWD130_95FE8EEKS bs=4k skip=732566645 count=1 |od -x

# time zpool create -o ashift=12 \
      -O atime=off -O canmount=off -O compression=lz4 \
      -O xattr=sa -O mountpoint=/ -R /mnt/rpool \
      rpool raidz \
   /dev/disk/by-id/ata-TOSHIBA_HDWD130_95FE8EEKS \
   /dev/disk/by-id/ata-TOSHIBA_HDWD130_683T527AS \
   /dev/disk/by-id/ata-TOSHIBA_HDWD130_683T827AS \
   /dev/disk/by-id/ata-TOSHIBA_HDWD130_58VSRTAAS

   
real	0m3,843s
user	0m0,018s
sys	0m0,017s

# zpool status
  pool: rpool
 state: ONLINE
  scan: none requested
config:

	NAME                               STATE     READ WRITE CKSUM
	rpool                              ONLINE       0     0     0
	  raidz1-0                         ONLINE       0     0     0
	    ata-TOSHIBA_HDWD130_95FE8EEKS  ONLINE       0     0     0
	    ata-TOSHIBA_HDWD130_683T527AS  ONLINE       0     0     0
	    ata-TOSHIBA_HDWD130_683T827AS  ONLINE       0     0     0
	    ata-TOSHIBA_HDWD130_58VSRTAAS  ONLINE       0     0     0

errors: No known data errors

3.1

# zfs create -o canmount=off -o mountpoint=none rpool/ROOT

3.2

# zfs create -o canmount=noauto -o mountpoint=/ rpool/ROOT/ubuntu
# zfs mount rpool/ROOT/ubuntu

3.3

root@q:/# zfs create                 -o setuid=off              rpool/home
root@q:/# zfs create -o mountpoint=/root                        rpool/home/root
root@q:/# zfs create -o canmount=off -o setuid=off  -o exec=off rpool/var
root@q:/# zfs create -o com.sun:auto-snapshot=false             rpool/var/cache
root@q:/# zfs create -o acltype=posixacl -o xattr=sa            rpool/var/log
root@q:/# zfs create                                            rpool/var/spool
root@q:/# zfs create -o com.sun:auto-snapshot=false -o exec=on  rpool/var/tmp

3.4 (LUKS-only -- skip)

3.5

# cd /mnt/rpool/
# mkdir /mnt/backup
# mount /dev/disk/by-id/usb-Seagate_Backup+_Hub_BK_NA8T1D1P-0:0-part2 /mnt/backup/
# tar tvzf /mnt/backup/lxdU18.tgz |less
# tar xzf /mnt/backup/lxdU18.tgz

4.4

# mount --rbind /dev  /mnt/rpool/dev
# mount --rbind /proc /mnt/rpool/proc
# mount --rbind /sys  /mnt/rpool/sys/
# chroot /mnt/rpool/ /bin/bash --login
(chroot) ? # ln -sf /proc/self/mounts /etc/mtab
(chroot) # apt update 
(chroot) # apt install zfs-initramfs  ### installs zfs-initramfs zfs-zed zfsutils-linux etc.
(chroot) # mount /dev/disk/by-id/usb-SanDisk_Cruzer_Fit_4C532000020531123414-0\:0-part1 /mnt/boot/
### ls -l /boot/initrd.img-4.15.0-34-generic /mnt/boot/initrd.img-4.15.0-34-generic 
### 54115891 Sep 14     /boot/initrd.img-4.15.0-34-generic
### 52378994 Sep 12 /mnt/boot/initrd.img-4.15.0-34-generic

4.11

(chroot) # zfs set mountpoint=legacy rpool/var/log
(chroot) # zfs set mountpoint=legacy rpool/var/tmp
(chroot) # cat >> /etc/fstab << EOF
rpool/var/log /var/log zfs defaults 0 0
rpool/var/tmp /var/tmp zfs defaults 0 0
EOF
(chroot) # grub-probe /
zfs
(chroot) # update-initramfs -c -k all
### 54169482 Sep 14 16:38     /boot/initrd.img-4.15.0-33-generic
### 54168180 Sep 14 16:38     /boot/initrd.img-4.15.0-34-generic
### 54115891 Sep 14 16:24 /mnt/boot/initrd.img-4.15.0-34-generic
### 52314526 Aug 24 17:49 /mnt/boot/initrd.img-4.15.0-33-generic
(chroot) # cp -p /boot/initrd.img-4.15.0-34-generic /mnt/boot/initrd.img-4.15.0-34-generic
(chroot) # <edit grub.cfg>

6.2

(chroot) # exit

6.3

# mount | grep -v zfs | tac | awk '/\/mnt/ {print $3}' | xargs umount -l 
# mount | grep -v zfs | tac | awk '/\/mnt/ {print $3}' | xargs -i{} umount -l {}
# cd /
# zpool export rpool
@mz0
Copy link
Author

mz0 commented Sep 27, 2018

apt purge

  • python3-dbus
  • python3-gi
    -- gir1.2-glib-2.0 libgirepository-1.0-1
  • ifenslave
  • libdumbnet1
  • gcc-5-base, gcc-6-base
  • tcpd
  • bcache-tools
  • btrfs-tools btrfs-progs
  • dmeventd (+ liblvm2cmd2.02 lvm2)
  • language-pack-en-base (+ language-pack-en)
  • libatm1
  • ethtool
  • os-prober
  • xauth
    -- libx11-6 libxau6 libxcb1 libxdmcp6 libxext6 libxmuu1
  • libusb-0.1-4
  • byobu
  • ftp
  • cryptsetup-bin
  • mdadm
  • dmsetup

@mz0
Copy link
Author

mz0 commented Mar 26, 2019

https://habr.com/ru/post/439860/ phoinixrw (Сергей Томулевич) 12.02.2019
https://postgres.men/os/linux/ubuntu-18-04-root-on-zfs/ 10.01.2019 Sergey Tomulevich

zfs create -o mountpoint=/ rpool/root
zpool set bootfs=rpool/root rpool
zfs create -o mountpoint=legacy rpool/tmp
zfs create -o mountpoint=legacy rpool/var
zfs create -o mountpoint=/home rpool/home
zfs create -o mountpoint=legacy rpool/home/root  # монтирование п.4.11

cd /mnt/
mkdir var tmp root
mount -t zfs rpool/var /mnt/var/
mount -t zfs rpool/tmp /mnt/tmp/
mount -t zfs rpool/home/root /mnt/root/
debootstrap bionic /mnt

mount --rbind /dev  /mnt/dev
mount --rbind /proc /mnt/proc
mount --rbind /sys  /mnt/sys
chroot /mnt /bin/bash --login

ln -s /proc/self/mounts /etc/mtab
chmod 1777 /tmp
apt update
dpkg-reconfigure locales

  * en_US.UTF-8
  * ru_RU.UTF-8

dpkg-reconfigure tzdata
apt install --yes --no-install-recommends linux-image-generic
apt install --yes zfs-initramfs
apt install --yes grub-pc
grub-probe /               # 4.8.1. (5.1) Проверяем
update-initramfs -u -k al  # 4.8.2. (5.2) Обновляем initrd

...
# 4.11. Исправление монтирования
cat >> /etc/fstab << EoT
rpool/var        /var   zfs  noatime,nodev 0 0
rpool/tmp        /tmp   zfs  noatime,nodev 0 0
rpool/home/root  /root  zfs  noatime,nodev 0 0
EoT

4.11. Исправление монтирования

монтирование ZFS разделов происходит после старта некоторых демонов (ZFS_INITRD_ADDITIONAL_DATASETS в /etc/default/zfs мы шатали безуспешно), которые пишут в /var и начинают заполнять системные журналы. Когда же настает время монтирования ZFS разделов выясняется что точки монтирования не пустые и смонтировать ничего не получается. Поэтому требуется указать точки монтирования в /etc/fstab так как systemd в первую очередь ориентируется на них при обращении к папке.

@mz0
Copy link
Author

mz0 commented Mar 26, 2019

zfs#3768 /var dataset requires legacy mount on systemd based distros

openzfs/zfs#7329 by @aerusso, merged on Apr 7, 2018.

This PR takes a different approach from #6974 [same author], which modified /etc/fstab to reflect ZFS mountpoints [for systemd]. Here, instead, ZFS mounts are tracked by directly creating native systemd .mount units, at early boot from the output of zfs list -H -t filesystem -oname,mountpoint,canmount. Because pools may not be imported, the output of this command can be saved in /etc/zfs/zfs-list.cache. If the pools are for some reason mounted at early boot (e.g., zfs on root), this file can be omitted and the command will be run.

This generator is not required; it does not interfere with zfs-mount.service, so anything missing from the cache file (or from an unimported pool) will be mounted as before.

As mentioned before, this allows for complex mount hierarchies (e.g., bind mounts that must happen after zfs mounts are made; any other filesystem mounted on top of any ZFS). Notice that ZFS on root users are most likely to want such features, and will not have to create the zfs-list.cache file.

@mz0
Copy link
Author

mz0 commented Mar 26, 2019

ZED (ZFS Event Daemon) monitors events generated by the ZFS kernel module.
When a zevent (ZFS Event) is posted, ZED will run any ZEDLETs that have been enabled for the corresponding zevent class.
An incomplete list of zevent environment variables (ZEVENT_EID, ZEVENT_CLASS, ZEVENT_TIME, etc.) can be found in the man-page:

bionic (8) zed.8.gz (provided by: zfs-zed_0.7.5-1ubuntu15)

cosmic (8) zed.8.gz (provided by: zfs-zed_0.7.9-3ubuntu6)

@mz0
Copy link
Author

mz0 commented Apr 18, 2019

#8200 has this comment: (2018.12.10 @rlaager)
This is really up to @gmelikov, but since it's a wiki and changes are easy to revert, I went ahead and boldly edited this. I see no point to the equal level filesystems approach. Either the filesystems should be at the top-level (as per the HOWTO) because they should not be rolled back with the root filesystem, or they should be a child of the root filesystem (as per the earlier example in the initrd documentation) because you do want them rolled back with the root filesystem but also want them as a separate dataset for some reason. The middle ground of having them under rpool/ROOT seems pointless (and could also confuse a beadm tool).

I also removed the examples of /boot and /var as things to separate under the root dataset: /var because it conflicts with the root-on-ZFS HOWTO, and /boot because that's where GRUB lives and GRUB is earlier in the process here. If someone knows what they are doing, they can modify things, but we shouldn't have attractive nuisances like that.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment