Difference between revisions of "ZFS"

From UVOO Tech Wiki
Jump to navigation Jump to search
 
(6 intermediate revisions by the same user not shown)
Line 1: Line 1:
 
# ZFS on Ubuntu 18.04
 
# ZFS on Ubuntu 18.04
  
 +
Bug fix force import on boot in case of zfs cache issue
 +
```
 +
pool=zfspv-pool;file=/var/spool/cron/crontabs/root; sudo cat $file | grep $pool || echo "@reboot /usr/sbin/zpool import $pool" | sudo tee -a $file
 +
file=/var/spool/cron/crontabs/root; sudo chmod 0600 $file && sudo chown root:crontab $file
 +
```
 +
 +
https://openzfs.github.io/openzfs-docs/man/8/zpool-create.8.html
  
 +
Install
 
```
 
```
 
sudo apt-get update && sudo apt-get install zfsutils-linux
 
sudo apt-get update && sudo apt-get install zfsutils-linux
Line 25: Line 33:
 
```
 
```
  
# https://docs.oracle.com/cd/E19253-01/819-5461/gbcya/index.html
+
https://docs.oracle.com/cd/E19253-01/819-5461/gbcya/index.html
  
# Not importing zpool on reboot
+
# Zpool not Not importing zpool on reboot
 +
- https://askubuntu.com/questions/1289462/zfs-import-cache-service-failed-after-upgrade-from-20-04-to-20-10
  
vim /etc/default/zfs
 
 
```
 
```
ZFS_INITRD_POST_MODPROBE_SLEEP='15' # increase longer/shorter if needed 5-15
+
sudo sed -i "s/^ZFS_INITRD_POST_MODPROBE_SLEEP.*/ZFS_INITRD_POST_MODPROBE_SLEEP='30'/g" /etc/default/zfs
sudo update-initramfs -k all -u
+
sudo mv /etc/zfs/zpool.cache /etc/zfs/zpool.cache.bad
sudo systemctl restart zfs-import-cache.service
+
sudo zpool import zfspv-pool
 +
sudo update-initramfs -u && sudo update-grub
 +
 
 +
# sudo systemctl status zfs-import-cache.service
 +
# sudo update-initramfs -k all -u
 
```
 
```
  
 +
Corrupt cache - device already in use
 +
```
 +
sudo systemctl status zfs-import-cache.service
 +
sudo mv /etc/zfs/zpool.cache /etc/zfs/zpool.cache.bad
 +
 +
```
  
 
```
 
```

Latest revision as of 15:41, 2 July 2023

ZFS on Ubuntu 18.04

Bug fix force import on boot in case of zfs cache issue

pool=zfspv-pool;file=/var/spool/cron/crontabs/root; sudo cat $file | grep $pool || echo "@reboot /usr/sbin/zpool import $pool" | sudo tee -a $file
file=/var/spool/cron/crontabs/root; sudo chmod 0600 $file && sudo chown root:crontab $file

https://openzfs.github.io/openzfs-docs/man/8/zpool-create.8.html

Install

sudo apt-get update && sudo apt-get install zfsutils-linux

lsblk --ascii -o NAME,PARTUUID,LABEL,PATH,FSTYPE,UUID
fdisk /dev/dev n, p, 1, i
sudo zpool create -f tank /dev/disk/by-partuuid/9c179fff-7b03-794e-8650-f1d507a885b6  # # zpool create pool02 /dev/disk/by-partuuid/c8e0c300-5ec9-714c-aef9-fa0dc3f0cab6
sudo zfs create tank/local-path

sudo zpool create -f tank /dev/sdc
zpool status
sudo zfs set mountpoint=/zfs/local-path tank/local-path
sudo zfs snapshot tank/local-path@snap1
zfs list -t snapshot
sudo zfs rollback tank/local-path@snap1
sudo zpool import
sudo zpool import tank -f
ls /zfs/local-path/

https://docs.oracle.com/cd/E19253-01/819-5461/gbcya/index.html

Zpool not Not importing zpool on reboot

sudo sed -i "s/^ZFS_INITRD_POST_MODPROBE_SLEEP.*/ZFS_INITRD_POST_MODPROBE_SLEEP='30'/g" /etc/default/zfs
sudo mv /etc/zfs/zpool.cache /etc/zfs/zpool.cache.bad
sudo zpool import zfspv-pool
sudo update-initramfs -u && sudo update-grub

# sudo systemctl status zfs-import-cache.service
# sudo update-initramfs -k all -u

Corrupt cache - device already in use

sudo systemctl status zfs-import-cache.service
 sudo mv /etc/zfs/zpool.cache /etc/zfs/zpool.cache.bad

sudo update-initramfs -u && sudo update-grub

Setup zpool with cache/logs from vdevs

sudo zpool create -f tank mirror nvme3n1 nvme4n1 mirror nvme5n1 nvme6n1

zpool create -f tank mirror sdd sde mirror sdf sdg mirror sdh sdi mirror sdj sdk
sudo zpool add tank mirror nvme2n1 nvme3n1
sudo zpool add tank log mirror nvme0n1 nvme1n1
sudo zpool add tank cache nvme2n1 nvme3n1

sys prep to have default docker and lxd on zfs datasets

sudo systemctl stop lxd lxd.socket
sudo rm -Rf /var/lib/lxd
sudo zfs create tank/lxd

sudo zfs create tank/lxd
sudo zfs set mountpoint=/var/lib/lxd tank/lxd
sudo zfs create tank/libvirt
sudo zfs set mountpoint=/var/lib/libvirt tank/libvirt
sudo zfs create tank/docker
sudo zfs set mountpoint=/var/lib/docker tank/docker
sudo zfs mount -a
sudo apt-get install docker-ce # after you setup from get docker site

install qemu-kvm libvirt

sudo apt-get install libguestfs-tools qemu-kvm libvirt-clients libvirt-daemon-system bridge-utils virt-manager lintian curl wget git

Other

sudo zpool import tank
sudo zfs mount -a
sudo apt-get -y install docker-ce

ZFS Trouble Shooting

nvme smart-log /dev/nvme0n1
nvme smart-log /dev/nvme3n1

both reported no errors

https://docs.huihoo.com/opensolaris/solaris-zfs-administration-guide/html/ch09s06.html

zpool clear ... didn't do anything

zpool status

errors: No known data errors

  pool: tank
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
    invalid.  Sufficient replicas exist for the pool to continue
    functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://zfsonlinux.org/msg/ZFS-8000-4J
  scan: none requested
config:

    NAME                     STATE     READ WRITE CKSUM
    tank                     DEGRADED     0     0     0
      mirror-0               ONLINE       0     0     0
        sdc                  ONLINE       0     0     0
        sdd                  ONLINE       0     0     0
      mirror-1               ONLINE       0     0     0
        sde                  ONLINE       0     0     0
        sdf                  ONLINE       0     0     0
      mirror-2               ONLINE       0     0     0
        sdg                  ONLINE       0     0     0
        sdh                  ONLINE       0     0     0
      mirror-3               ONLINE       0     0     0
        sdi                  ONLINE       0     0     0
        sdj                  ONLINE       0     0     0
    logs
      mirror-4               DEGRADED     0     0     0
        4221213393078321817  FAULTED      0     0     0  was /dev/nvme3n1
        nvme1n1              ONLINE       0     0     0
    cache
      nvme2n1                ONLINE       0     0     0
      nvme0n1                FAULTED      0     0     0  corrupted data


Use with caution. This example would be mirror

You could remove disk from cache and use that for mirror

zpool remove tank cache nvme0n1

replace drive using singular(not two dev names) command

zpool replace tank nvme3n1

auto used free nvme0n1 from pool if none available it may just replace itself

Failed Drives

ZFS Intent Log & ARC/L2Arch

linux disk hot-swapping
take a disk offline:
echo offline > /sys/block/sda/device/state

put a disk online:
echo running > /sys/block/sda/device/state

delete a disk before unplugging:
echo 1 > sys/block/sda/device/delete

scan for new hotplugged disks:
echo "0 0 0" > /sys/class/scsi_host/host/scan

display the controller IDs:
omreport storage controller

display the IDs for the physical disks attached to the controller:
omreport storage pdisk controller=0

displays property information for all virtual disks on all controllers
omreport storage vdisk

make sure that I’m dealing with the proper disks:
omconfig storage pdisk action=blink controller=0 pdisk=0:0:2
omconfig storage pdisk action=unblink controller=0 pdisk=0:0:2
omconfig storage pdisk action=blink controller=0 pdisk=0:0:3
omconfig storage pdisk action=unblink controller=0 pdisk=0:0:3
clear the configuration:
omconfig storage controller action=clearforeignconfig controller=0

check for the new virtual disk:
omreport storage vdisk

initialize a disk:
omconfig storage vdisk action=fastinit controller=0 vdisk=id

how to replace a disk on a dell server with several disks in a jbod configuration
umount /dev/sdg1
echo offline > /sys/block/sdg/device/state
echo 1 > sys/block/sdg/device/delete

install replacement disk, then:

echo "0 0 0" > /sys/class/scsi_host/host/scan
#
# WARNING!! WARNING!! Danger Will Robinson!!
# verify that the disk is the same device
#
fdisk -l /dev/sdg
#
echo running > /sys/block/sdg/device/state
parted /dev/sdg
mkfs.ext3 -m0 -L /disk6  /dev/sdg1
tune2fs -i0 -c0 /dev/sdg1
mount /dev/sdg1

fdisk /dev/sdg
>>>      Command (m for help): n (create new partition)
>>>      Partition number (1-4): 1
>>>      Command (m for help): p (create primary partition)
>>>      Command (m for help): w    (Write and save partition table)
mkfs.ext3 -L disk2 -m 0 /dev/sdg1
tune2fs -c 0 -i 0 /dev/sdg1
cat >>/etc/fstab <<EOM
LABEL=disk2           /disk2                   ext3    defaults        1 2
EOM
mkdir /disk2
mount -L disk2

Backup using end and receive snapshots