apt install and --reinstall
remove
purge or --purge
upgrade
update
clean and autoclean
apt-cache policy <package name>
search
show
apt update
apt clean
apt autoclean
apt install and --reinstall
remove
purge or --purge
upgrade
update
clean and autoclean
apt-cache policy <package name>
search
show
apt update
apt clean
apt autoclean
# 1
sudo dd if=/dev/sda bs=512 count=1 2>/dev/null | strings | grep -Eoi 'grub|lilo|acronis|reboot'
#2
dd if=/dev/hda bs=512 count=1 2>&1 | grep GRUB
dd if=/dev/hda bs=512 count=1 2>&1 | grep LILO
https://askubuntu.com/questions/24459/how-do-i-find-out-which-boot-loader-i-have
sudo apt-get install nvme-cli
sudo nvme list
sudo nvme id-ctrl /dev/nvme0n1
# Get info about smart
sudo nvme smart-log /dev/nvme0
sudo nvme smart-log-add /dev/nvme0
# Additional info
sudo nvme id-ctrl /dev/nvme0 -H
sudo nvme id-ns /dev/nvme0n1 -H
sudo nvme show-regs /dev/nvme0n1 -H
# GPT-Tables
# copy partion to file and restore
sgdisk --backup=sda_table /dev/sda
sgdisk --load-backup=sda_table /dev/sdb
sgdisk -G /dev/sdb
# Backup and Restore from /dev/sda to /dev/sdb in one command:
sgdisk -R /dev/sdb /dev/sda
# Finally randomize the GUID of all partitions on the disk:
sgdisk -G /dev/sdb
# MBR
# Copy table from /dev/sda to /dev/sdb:
sfdisk -d /dev/sda | sfdisk /dev/sdb
# (Optional)If you don't see the partitions, read it again:
sfdisk -R /dev/sdb
# 1
sudo apt-get install pv
# Example
dd if=/dev/urandom | pv | dd of=/dev/null
# 2
dd if=/dev/urandom of=/dev/null status=progress
# 2
# run procces dd
# open another terminal
sudo kill -USR1 $(pgrep ^dd)
# This will display dd progress in the dd terminal window without halting the process.
# If you're on BSD or OS X, use INFO instead of USR1. The USR1 signal will terminate dd.
# repeat
watch -n5 'sudo kill -USR1 $(pgrep ^dd)'
# clean disk
dd if=/dev/zero of=/dev/sdz bs=2M oflag=direct
apt-get install fio
# 1
fio -ioengine=libaio -name=test -direct=1 -filename=/dev/sdX \
-bs=4k -rw=randwrite -iodepth=1 -fsync=1
-bs=4k -rw=randwrite -iodepth=128
-bs=4M -rw=write -iodepth=16
#2
# IOPS Performance Tests
# Test random reads
sudo fio --filename=device name --direct=1 --rw=randread --bs=4k --ioengine=libaio --iodepth=256 --runtime=120 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1 --readonly
# Test file random read/writes
sudo fio --filename=/custom mount point/file --size=500GB --direct=1 --rw=randrw --bs=4k --ioengine=libaio --iodepth=256 --runtime=120 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1
sudo fio --filename=/home/administrator/temp/fio_file --size=20GB --direct=1 --rw=randrw --bs=4k --ioengine=libaio --iodepth=256 --runtime=240 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1
# Test device random read/writes
sudo fio --filename=device name --direct=1 --rw=randrw --bs=4k --ioengine=libaio --iodepth=256 --runtime=120 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1
# Test device random read/writes 16/16
sudo fio --filename=device name --direct=1 --rw=randrw --bs=4k --ioengine=libaio --iodepth=16 --runtime=120 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1
# Test sequential reads
sudo fio --filename=/dev/sdb --direct=1 --rw=read --bs=4k --ioengine=libaio --iodepth=256 --runtime=120 --numjobs=4 --time_based --group_reporting --name=iops-test-job --eta-newline=1 --readonly
# Throughput Performance Tests
# Test random reads
sudo fio --filename=device name --direct=1 --rw=randread --bs=64k --ioengine=libaio --iodepth=64 --runtime=120 --numjobs=4 --time_based --group_reporting --name=throughput-test-job --eta-newline=1 --readonly
# Test file random read/writes
sudo fio --filename=/custom mount point/file --size=500GB --direct=1 --rw=randrw --bs=64k --ioengine=libaio --iodepth=64 --runtime=120 --numjobs=4 --time_based --group_reporting --name=throughput-test-job --eta-newline=1
# Test device random read/writes
sudo fio --filename=device name --direct=1 --rw=randrw --bs=64k --ioengine=libaio --iodepth=64 --runtime=120 --numjobs=4 --time_based --group_reporting --name=throughput-test-job --eta-newline=1
# Test sequential reads
sudo fio --filename=device name --direct=1 --rw=read --bs=64k --ioengine=libaio --iodepth=64 --runtime=120 --numjobs=4 --time_based --group_reporting --name=throughput-test-job --eta-newline=1 --readonly
# Latency Performance Tests
# Test random reads for latency
sudo fio --filename=device name --direct=1 --rw=randread --bs=4k --ioengine=libaio --iodepth=1 --numjobs=1 --time_based --group_reporting --name=readlatency-test-job --runtime=120 --eta-newline=1 --readonly
# Test random read/writes for latency
sudo fio --filename=device name --direct=1 --rw=randrw --bs=4k --ioengine=libaio --iodepth=1 --numjobs=1 --time_based --group_reporting --name=rwlatency-test-job --runtime=120 --eta-newline=1 --readonly
https://askubuntu.com/questions/57908/how-can-i-quickly-copy-a-gpt-partition-scheme-from-one-hard-drive-to-another https://tech.feedyourhead.at/content/copy-partition-table-one-disk-another https://www.cyberciti.biz/faq/linux-backup-restore-a-partition-table-with-sfdisk-command/ https://askubuntu.com/questions/215505/how-do-you-monitor-the-progress-of-dd
https://www.percona.com/blog/2017/02/09/using-nvme-command-line-tools-to-check-nvme-flash-health/ https://unix.stackexchange.com/questions/472211/list-features-of-nvme-drive-like-hdparm-i-for-non-nvme https://wiki.archlinux.org/index.php/Solid_state_drive/NVMe
http://recoverymonkey.org/2012/07/26/an-explanation-of-iops-and-latency/ https://habr.com/ru/post/154235/
# Get info
ipmitool lan print
ipmitool lan print 1
ipmitool lan set 1 ipsrc [ static | dhcp ]
ipmitool lan set 1 ipaddr {YOUR DESIRED IP}
ipmitool lan set 1 netmask {YOUR NETMASK}
ipmitool lan set 1 defgw ipaddr 10.0.1.1
# To Get Lan Mode for Supermicro
ipmitool raw 0x30 0x70 0x0C 0x00
# return code:
# 00 dedicated
# 01 shared
# 02 Failover
# Set Lan Mode
# for dedicated
ipmitool raw 0x30 0x70 0x0C 0x01 0
# for shared
ipmitool raw 0x30 0x70 0x0C 0x01 1
# for failover
ipmitool raw 0x30 0x70 0x0C 0x01 2
# Reset IPMI
ipmitool mc reset cold
ipmitool bmc reset cold
# Factory reset
ipmitool raw 0x30 0x40
# Modify boot device for the next reboot
ipmitool chassis bootdev pxe
ipmitool chassis bootdev cdrom
ipmitool chassis bootdev bios
https://serverfault.com/questions/361940/configuring-supermicro-ipmi-to-use-one-of-the-lan-interfaces-instead-of-the-ipmi https://www.supermicro.com/support/faqs/faq.cfm?faq=9848 https://serverfault.com/questions/361940/configuring-supermicro-ipmi-to-use-one-of-the-lan-interfaces-instead-of-the-ipmi https://www.atlex.ru/baza-znanij/rukovodstva/instruktsiya-po-cold-reset-i-smene-parolya-ipmi-iz-os/ https://portal.nutanix.com/page/documents/kbs/details?targetId=kA00e000000CrKRCA0 https://community.pivotal.io/s/article/How-to-work-on-IPMI-and-IPMITOOL?language=en_US https://www.thomas-krenn.com/en/wiki/Configuring_IPMI_under_Linux_using_ipmitool
# firewalld on CentOS Linux 7/RHEL v7 and newer
$ sudo systemctl disable firewalld
$ sudo systemctl stop firewalld
$ sudo systemctl status firewalld
# Ubuntu Linux
$ sudo ufw disable
# firewall on older version of CentOS/RHEL version 6.x and earlier
service iptables save
service iptables stop
chkconfig iptables off
# Some examples
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
# Short test
smartctl -t short /dev/sdb
# wait couple minute
smartctl -l selftest /dev/sdb
https://www.thomas-krenn.com/en/wiki/Analyzing_a_Faulty_Hard_Disk_using_Smartctl https://serverfault.com/questions/313649/how-to-interpret-this-smartctl-smartmon-data https://www.opennet.ru/base/sys/smart_hdd_mon.txt.html
yum -y install vim-enhanced
# Edit file /etc/profile.d/vim.sh
vi /etc/profile.d/vim.sh
# comment line “[ -n “$ID” -a “$ID” -le 200 ] && return“.
# save after that logout and login
https://www.thegeekdiary.com/how-to-enable-text-colour-in-vi-similar-to-vim-in-centosrhel/
zpool status -v
zpool status -v rpool
# with device name
zpool status -vL rpool
zpool attach [poolname] [original drive to be mirrored] [new drive]
# An example
zpool attach rpool /dev/sda /dev/sdb
zpool offline rpool /dev/sdb
zpool remove rpool /dev/sdb
# or
zpool detach rpool /dev/sdb
# start
zpool scrub rpool
# stop
zpool scrub -s rpool
zpool set autoexpand=on rpool
watch zpool status -v
zpool status -v
zpool online -e rpool /dev/sdb2
zpool online -e rpool /dev/sda2
zpool set autoexpand=off rpool
zpool status
zpool scrub rpool
#### sda
# Offline sda
zpool status -v
zpool offline rpool /dev/sda2
smartctl -a /dev/sda
hdparm -I /dev/sda
# Add new sda
sgdisk -R /dev/sda /dev/sdb
sgdisk -G /dev/sda
fdisk -l
dd if=/dev/sdb1 of=/dev/sda1
zpool replace rpool /dev/sda2 /dev/sda2
zpool status -v
# offline sdb
zpool status -v
zpool offline rpool /dev/sdb2
# Add new sdb
sgdisk -R /dev/sdb /dev/sda
sgdisk -G /dev/sdb
fdisk -l
dd if=/dev/sda1 of=/dev/sdb1
zpool replace rpool /dev/sdb2 /dev/sdb2
zpool status -v
https://edmondscommerce.github.io/replacing-failed-drive-in-zfs-zpool-on-proxmox/