-
-
Save digitalsignalperson/0da0cd70ab8c64f32583976cd4bd180b to your computer and use it in GitHub Desktop.
#!/bin/bash | |
# From https://www.medo64.com/2022/10/native-zfs-encryption-speed-ubuntu-22-10/ | |
# https://docs.google.com/spreadsheets/d/1spTTlEJESnVFSEsB98iVviA-l_unf6Hemx9Nh8KrBBI/edit#gid=1839759404 | |
# | |
# Tweaks for faster test: only do 2 tests, only sleep 2 seconds, only use aes-256-gcm for zfs | |
# Tweaks to LUKS config | |
# don't use sync=always | |
# use only 1 vdev | |
ANSI_RESET="$(tput sgr0)" | |
ANSI_RED="`[ $(tput colors) -ge 16 ] && tput setaf 9 || tput setaf 1 bold`" | |
ANSI_CYAN="`[ $(tput colors) -ge 16 ] && tput setaf 14 || tput setaf 6 bold`" | |
ntests=1 | |
tsleep=2 | |
# Checks | |
if [ "$EUID" -ne 0 ]; then | |
echo "${ANSI_RED}Must run as root!${ANSI_RESET}" | |
exit 1 | |
fi | |
if [[ -e "/ramdisk" ]]; then | |
echo "${ANSI_RED}/ramdisk already exists!${ANSI_RESET}" | |
exit 1 | |
fi | |
# Setup RAM disk | |
mkdir /ramdisk | |
mount -t tmpfs -o size=42G tmpfs /ramdisk | |
echo "1073741824" > /sys/module/zfs/parameters/zfs_arc_max | |
# Create test data | |
swapoff --all | |
dd if=/dev/urandom of=/ramdisk/data.bin bs=1M count=4096 &> /dev/null || exit | |
# Test raw | |
echo "${ANSI_CYAN}raw${ANSI_RESET}" | |
for ((I=1; I<=ntests; I++)); do | |
sleep ${tsleep} | |
echo -n " write $I: " | |
dd if=/ramdisk/data.bin of=/ramdisk/raw$I.bin bs=1M |& grep "copied" || exit | |
done | |
for ((I=1; I<=ntests; I++)); do | |
sleep ${tsleep} | |
echo -n " read $I: " | |
dd if=/ramdisk/raw$I.bin of=/dev/null bs=1M |& grep "copied" || exit | |
done | |
rm /ramdisk/raw*.bin | |
# Test ZFS | |
for ENCRYPTION in \ | |
"none" \ | |
"aes-256-gcm" \ | |
"luks" \ | |
; do | |
rm /ramdisk/disk*.bin 2>/dev/null | |
dd if=/dev/zero of=/ramdisk/disk1.bin bs=1MB count=6144 &> /dev/null || exit | |
echo "${ANSI_CYAN}$ENCRYPTION${ANSI_RESET}" | |
if [[ "$ENCRYPTION" == "none" ]]; then | |
sudo zpool create -o ashift=12 -O normalization=formD \ | |
-O acltype=posixacl -O xattr=sa -O dnodesize=auto -O atime=off \ | |
-O compression=off -O mountpoint=/zfs TestPool /ramdisk/disk1.bin || exit | |
elif [[ "$ENCRYPTION" == "luks" ]]; then | |
losetup -f /ramdisk/disk1.bin || exit | |
DEVS="" | |
DISKS=`losetup -a | grep "/ramdisk/" | cut -d: -f1` | |
for DISK in $DISKS; do | |
echo "12345678" | cryptsetup luksFormat -q --cipher aes-xts-plain64 --key-size 512 --use-random --sector-size 4096 --pbkdf argon2id $DISK | |
echo "12345678" | cryptsetup luksOpen $DISK --allow-discards --perf-no_read_workqueue --perf-no_write_workqueue `basename $DISK` | |
DEVS="$DEVS /dev/mapper/`basename $DISK` " | |
done | |
zpool create -o ashift=12 -O normalization=formD \ | |
-O acltype=posixacl -O xattr=sa -O dnodesize=auto -O atime=off \ | |
-O compression=off -O mountpoint=/zfs TestPool $DEVS || exit | |
else | |
echo "12345678" | zpool create -o ashift=12 -O normalization=formD \ | |
-O acltype=posixacl -O xattr=sa -O dnodesize=auto -O atime=off \ | |
-O encryption=$ENCRYPTION -O keylocation=prompt -O keyformat=passphrase \ | |
-O compression=off -O mountpoint=/zfs TestPool /ramdisk/disk1.bin || exit | |
fi | |
for ((I=1; I<=ntests; I++)); do | |
sleep ${tsleep} | |
echo -n " Write $I: " | |
dd if=/ramdisk/data.bin of=/zfs/data$I.bin bs=1M |& grep "copied" || exit | |
done | |
for ((I=1; I<=ntests; I++)); do | |
sleep ${tsleep} | |
echo -n " read $I: " | |
dd if=/zfs/data$I.bin of=/dev/null bs=1M |& grep "copied" || exit | |
done | |
zpool destroy TestPool || exit | |
done |
re-running the original script with the extra params for LUKS performance
- echo "12345678" | cryptsetup luksFormat -q --cipher aes-xts-plain64 --key-size 512 --pbkdf argon2id $DISK
- echo "12345678" | cryptsetup luksOpen $DISK `basename $DISK`
+ echo "12345678" | cryptsetup luksFormat -q --cipher aes-xts-plain64 --key-size 512 --use-random --sector-size 4096 --pbkdf argon2id $DISK
+ echo "12345678" | cryptsetup luksOpen $DISK --allow-discards --perf-no_read_workqueue --perf-no_write_workqueue `basename $DISK`
output
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.49711 s, 2.9 GB/s
write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.28939 s, 3.3 GB/s
write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.29132 s, 3.3 GB/s
write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.28651 s, 3.3 GB/s
write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.29111 s, 3.3 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.580451 s, 7.4 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.584053 s, 7.4 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.617603 s, 7.0 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.593104 s, 7.2 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.582116 s, 7.4 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.30034 s, 1.9 GB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.4017 s, 1.8 GB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.35363 s, 1.8 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.23002 s, 1.9 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.20156 s, 2.0 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.46585 s, 2.9 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.47835 s, 2.9 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.47142 s, 2.9 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.47317 s, 2.9 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.48431 s, 2.9 GB/s
aes-128-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.63285 s, 1.6 GB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.74836 s, 1.6 GB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.62096 s, 1.6 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.49405 s, 1.7 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.52034 s, 1.7 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.93702 s, 2.2 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.84244 s, 2.3 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.82607 s, 2.4 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.84296 s, 2.3 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.83512 s, 2.3 GB/s
aes-192-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.67327 s, 1.6 GB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.68973 s, 1.6 GB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.49669 s, 1.7 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.49975 s, 1.7 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.53728 s, 1.7 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.83145 s, 2.3 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.82214 s, 2.4 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.81831 s, 2.4 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.84209 s, 2.3 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.8365 s, 2.3 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.65877 s, 1.6 GB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.70704 s, 1.6 GB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.53687 s, 1.7 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.53156 s, 1.7 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.55942 s, 1.7 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.8428 s, 2.3 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.84638 s, 2.3 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.83718 s, 2.3 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.84033 s, 2.3 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.85016 s, 2.3 GB/s
aes-128-ccm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.4971 s, 260 MB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.4369 s, 261 MB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.2175 s, 265 MB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.1602 s, 266 MB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.256 s, 264 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.7042 s, 338 MB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.4746 s, 344 MB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.5184 s, 343 MB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.4392 s, 345 MB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.6482 s, 340 MB/s
aes-192-ccm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.6877 s, 257 MB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.6784 s, 258 MB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.5006 s, 260 MB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.4116 s, 262 MB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.4714 s, 261 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.9456 s, 332 MB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.7604 s, 337 MB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.9152 s, 333 MB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.8389 s, 335 MB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.77 s, 336 MB/s
aes-256-ccm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.9289 s, 254 MB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.987 s, 253 MB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.8424 s, 255 MB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.6919 s, 257 MB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 16.7647 s, 256 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.8198 s, 335 MB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.9471 s, 332 MB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.7224 s, 338 MB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.6477 s, 340 MB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 12.98 s, 331 MB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.23965 s, 1.3 GB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.26535 s, 1.3 GB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.15645 s, 1.4 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.11602 s, 1.4 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.18875 s, 1.3 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.74564 s, 2.5 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.72891 s, 2.5 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.73381 s, 2.5 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.73962 s, 2.5 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.73328 s, 2.5 GB/s
averages:
- raw: read: 7.28 GB/s; write: 3.22 GB/s
- none: read: 2.90 GB/s; write: 1.88 GB/s
- aes-128-gcm: read: 2.30 GB/s; write: 1.64 GB/s
- aes-192-gcm: read: 2.34 GB/s; write: 1.66 GB/s
- aes-256-gcm: read: 2.30 GB/s; write: 1.66 GB/s
- aes-128-ccm: read: 342.00 MB/s; write: 263.20 MB/s
- aes-192-ccm: read: 334.60 MB/s; write: 259.60 MB/s
- aes-256-ccm: read: 335.20 MB/s; write: 255.00 MB/s
- luks: read: 2.50 GB/s; write: 1.34 GB/s
summary:
- aes-256-gcm: read: 2.30 GB/s; write: 1.66 GB/s
- luks: read: 2.50 GB/s; write: 1.34 GB/s
Doesn't look significantly different with the change
testing the 2nd revision of the script, which has the changes for LUKS (which didn't seem to matter), and just doing 1 test (oops {1..${ntests}}
is not valid bash, so just ignoring that...) with shorter delays between tests
- raw: read: 7.40 GB/s; write: 2.90 GB/s
- none: read: 6.80 GB/s; write: 1.90 GB/s
- aes-256-gcm: read: 1.80 GB/s; write: 1.60 GB/s
- luks: read: 6.70 GB/s; write: 1.30 GB/s
here we're seeing the massive read speed for LUKS, which is consistent with the test from
- https://www.reddit.com/r/zfs/comments/wdrfxp/testing_and_comparing_io_performance_with_and/?sort=new
- https://github.com/jkool702/zfsEncryption_SpeedTest
that author suggests there's something related to the ARC (https://www.reddit.com/r/zfs/comments/wdrfxp/comment/ij1sw26/?utm_source=reddit&utm_medium=web2x&context=3, openzfs/zfs#13736)
so is there some bias from running more repeats of tests before reaching the luks tests that is affecting the ARC use? And the ARC was deliberately set to 1GB in this script
So which test result is most likely to reflect real world performance? Is the 7GB/sec LUKS read artificial?
ubuntu 23.04 in a VM this time, for convenience
fixed the for loop for ntests
ntests=1
tsleep=13
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.65696 s, 1.2 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.59917 s, 7.2 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.16197 s, 832 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.47988 s, 2.9 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.32871 s, 806 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.83335 s, 2.3 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 6.70606 s, 640 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.9443 s, 1.5 GB/s
ntests=1
tsleep=2
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.08022 s, 2.1 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.564674 s, 7.6 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.18786 s, 828 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.60576 s, 2.7 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.18597 s, 828 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.81138 s, 2.4 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 6.76436 s, 635 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.47837 s, 1.2 GB/s
no longer showing the fast read
ntests=5
tsleep=2
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.1298 s, 2.0 GB/s
write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.68153 s, 2.6 GB/s
write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.28239 s, 3.3 GB/s
write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.70635 s, 2.5 GB/s
write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.76906 s, 2.4 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.561036 s, 7.7 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.564651 s, 7.6 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.567644 s, 7.6 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.565797 s, 7.6 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.58636 s, 7.3 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 4.87423 s, 881 MB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.86044 s, 1.1 GB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.10104 s, 1.4 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.13075 s, 1.4 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.05914 s, 1.4 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.50446 s, 2.9 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.4684 s, 2.9 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.49998 s, 2.9 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.4969 s, 2.9 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.53308 s, 2.8 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.13876 s, 836 MB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 4.65661 s, 922 MB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.62946 s, 1.2 GB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 4.02631 s, 1.1 GB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.75486 s, 1.1 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.92188 s, 2.2 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.82023 s, 2.4 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.82875 s, 2.3 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.92596 s, 2.2 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.87577 s, 2.3 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 7.38316 s, 582 MB/s
Write 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 6.40385 s, 671 MB/s
Write 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.1141 s, 840 MB/s
Write 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.17265 s, 830 MB/s
Write 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.20904 s, 825 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.53846 s, 1.7 GB/s
read 2: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.99465 s, 1.4 GB/s
read 3: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.86417 s, 1.5 GB/s
read 4: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.1706 s, 1.4 GB/s
read 5: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.97589 s, 1.4 GB/s
can't reproduce the 6+ GB/sec LUKS reads from my previous testing
ntests=1
tsleep=2
in VM
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.11431 s, 2.0 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.57665 s, 7.4 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 4.91082 s, 875 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.58907 s, 2.7 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 5.10382 s, 842 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.79858 s, 2.4 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 7.8769 s, 545 MB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.17711 s, 1.4 GB/s
on metal
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 1.49477 s, 2.9 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.579176 s, 7.4 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.28921 s, 1.9 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.634593 s, 6.8 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.61906 s, 1.6 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.42085 s, 1.8 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.21853 s, 1.3 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.63425 s, 6.8 GB/s
added another revision with
- remove sync=always
- only use 1 vdev
results in VM with
ntests=1
tsleep=2
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.13835 s, 2.0 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.568023 s, 7.6 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.33315 s, 1.8 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.648203 s, 6.6 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.57396 s, 1.7 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.47514 s, 1.7 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.13197 s, 1.4 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.695824 s, 6.2 GB/s
we now have the 6GB/s none,luks read
adding back sync=always, same test in VM
raw
write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.09128 s, 2.1 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.58907 s, 7.3 GB/s
none
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.49589 s, 1.7 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.670201 s, 6.4 GB/s
aes-256-gcm
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.87812 s, 1.5 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 2.5671 s, 1.7 GB/s
luks
Write 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 3.38855 s, 1.3 GB/s
read 1: 4294967296 bytes (4.3 GB, 4.0 GiB) copied, 0.679988 s, 6.3 GB/s
So raidz2 with 6 disks vs no raid 1 disk, the latter consistently shows ZFS with no encryption or LUKS + ZFS with no encryption get 6GB/sec reads
output from original script (see first revision)
quick summary
and of main interest: