diff --git a/active/software_btrfs/btrfs.md b/active/software_btrfs/btrfs.md index c4b6ad4..200b564 100644 --- a/active/software_btrfs/btrfs.md +++ b/active/software_btrfs/btrfs.md @@ -1,21 +1,29 @@ # BTRFS - [BTRFS](#btrfs) + - [Naming Conventions](#naming-conventions) - [Creating an Array](#creating-an-array) + - [Converting an Array Between RAID Versions](#converting-an-array-between-raid-versions) - [Mounting the Array](#mounting-the-array) - [Adding Disks](#adding-disks) - [Replacing a Disk](#replacing-a-disk) - [Scrubbing the Array](#scrubbing-the-array) - [Creating Subvolumes](#creating-subvolumes) - [Monitoring Usage](#monitoring-usage) - - [Encrypting BTRFS with LUKS](#encrypting-btrfs-with-luks) - [Monitoring Disk Health](#monitoring-disk-health) - [Defragmenting and Compressing](#defragmenting-and-compressing) + - [Converting ext4 to btrfs](#converting-ext4-to-btrfs) Oracle [has decent docs here](https://docs.oracle.com/en/operating-systems/oracle-linux/8/btrfs/btrfs-ResizingaBtrfsFileSystem.html) You'll also want to [read about btrfs compression](https://thelinuxcode.com/enable-btrfs-filesystem-compression/) +## Naming Conventions + +`poolX` is my naming convention for data pools. `pool0` is the first pool you create. + +`backupX` is my naming convention for backup pools. `backup0` is hte first backup pool you create. + ## Creating an Array ```bash @@ -39,11 +47,21 @@ btrfs device scan # Raid10 mkfs.btrfs --data raid10 --metadata raid10 /dev/vdb /dev/vdc /dev/vdd /dev/vde btrfs device scan +``` +Label your arrays for easier identification in btrfs filesystem information commands + +```bash +btrfs filesystem label /btrfs/pool0 pool0 +``` + +## Converting an Array Between RAID Versions + +```bash # Convert to raid1 # -dconvert == "data convert" # -mconvert == "metadata convert" -btrfs balance start -dconvert=raid1 -mconvert=raid1 /btrfs +btrfs balance start -dconvert=raid1 -mconvert=raid1 /btrfs/pool0 btrfs balance status ``` @@ -53,48 +71,59 @@ One off ```bash # Create a mount point -mkdir /btrfs +mkdir /btrfs/pool0 + +# List the filesystem UUID +lsblk --fs # Mount the top level subvolume -mount /dev/vdb /btrfs -o subvolid=5 +mount UUID=xxxxx-xxxxx-xxxxx /btrfs/pool0 -o subvolid=5 # Mount with better SSD support -mount /dev/vdb /btrfs -o subvolid=5,ssd +mount UUID=xxxxx-xxxxx-xxxxx /btrfs/pool0 -o subvolid=5,ssd # Mount with auto defragmentation for HDD support -mount /dev/vdb /btrfs -o subvolid=5,autodefrag +mount UUID=xxxxx-xxxxx-xxxxx /btrfs/pool0 -o subvolid=5,autodefrag # Mount a subvolume -mount /dev/vdb /btrfs -o subvol=home +mount UUID=xxxxx-xxxxx-xxxxx /btrfs/pool0 -o subvol=home # Inspect -btrfs filesystem show /btrfs +btrfs filesystem show /btrfs/pool0 ``` In fstab ```conf -UUID=btrfs_uuid /btrfs btrfs defaults 0 0 +UUID=btrfs_uuid /btrfs/pool0 btrfs defaults 0 0 ``` ## Adding Disks ```bash # Add a disk -btrfs device add /dev/vdd /btrfs +btrfs device add /dev/vdd /btrfs/pool0 + +# Balance the array +btrfs balance start /btrfs/pool0 # Watch the expansion -btrfs filesystem usage /btrfs +btrfs filesystem usage /btrfs/pool0 ``` ## Replacing a Disk ```bash # Remove a disk from the array -btrfs device delete /dev/vdb /btrfs +# This may take a while, as btrfs will rebalance the array during this process +btrfs device remove /dev/vdb /btrfs/pool0 + +# You can watch the device "used" data drain with +watch btrfs filesystem show /btrfs/pool0/ # Add the new device -btrfs device add /dev/vdg /btrfs +# Again, this may take a while while btrfs rebalances. +btrfs device add /dev/vdg /btrfs/pool0 ``` ## Scrubbing the Array @@ -103,10 +132,10 @@ btrfs device add /dev/vdg /btrfs # Start a scrub to check for errors # -B prevents the process from going to the background # -d prints stats for each device -btrfs scrub start -Bd /btrfs +btrfs scrub start -Bd /btrfs/pool0 # Check the status of a scrub -btrfs scrub status /btrfs +btrfs scrub status /btrfs/pool0 # Watch for disk failures dmesg | grep btrfs @@ -116,13 +145,13 @@ dmesg | grep btrfs ```bash # Create a new subvolume (make sure to mount /btrfs as subvolid=5) -btrfs subvolume create /btrfs/foo +btrfs subvolume create /btrfs/pool0 # List all subvolumes under a path -btrfs subvolume list -t /btrfs +btrfs subvolume list -o /btrfs/pool0 # Delete a subvolume -btrfs subvolume delete /btrfs/foo +btrfs subvolume delete /btrfs/pool0 ``` ## Monitoring Usage @@ -132,55 +161,10 @@ btrfs subvolume delete /btrfs/foo btrfs filesystem show # Show usage for a specific array -btrfs filesystem usage /btrfs +btrfs filesystem usage /btrfs/pool0 # Quick command to filter for data used -btrfs filesystem usage /btrfs | grep 'Data.*Used' -``` - -## Encrypting BTRFS with LUKS - -```bash -export KEYFILE_PATH=/root/btrfs.keyfile -export LUKS_DEVS="sdb sdc sdd sde sdf sdg sdh" - -# Create a key file -dd if=/dev/urandom of=${KEYFILE_PATH} bs=128 count=1 -chmod 400 ${KEYFILE_PATH} - -# Create partitions -for luks_dev in $LUKS_DEVS; do -echo Creating partition for /dev/$luks_dev -parted -s -a optimal -- /dev/$luks_dev mklabel gpt mkpart primary 1MiB 100% -done - -# Check that your list is good -for luks_dev in $LUKS_DEVS; do -echo will encrypt /dev/${luks_dev}1 and create /dev/mapper/luks-$(lsblk -n -o PARTUUID /dev/${luks_dev}1) -done - -# Create the luks partitions -# Note that --iter-time 10000 is how long, in milliseconds, to decrypt the key -# -v is verbose -# -q is "batch mode", don't ask for confirmation -# Longer makes it harder to brute-force -for luks_dev in $LUKS_DEVS; do \ - LUKS_UUID=$(lsblk -n -o PARTUUID /dev/${luks_dev}1) - LUKS_NAME=luks-${LUKS_UUID} - echo "Encrypting /dev/${luks_dev}1"; \ - cryptsetup luksFormat -v -q --key-file ${KEYFILE_PATH} /dev/${luks_dev}1 - echo "Unlocking /dev/${luks_dev}1 as ${LUKS_NAME}" - cryptsetup open /dev/${luks_dev}1 ${LUKS_NAME} --key-file=${KEYFILE_PATH} - echo "Adding ${LUKS_NAME} UUID=${LUKS_UUID} ${KEYFILE_PATH} discard to crypttab" - echo "${LUKS_NAME} UUID=${LUKS_UUID} none discard" >> /etc/crypttab -done - -# List filesystems with UUID -lsblk --fs - -# Now create the array using the /dev/mapper entries from above -mkfs.btrfs --data raid1 --metadata raid1 /dev/mapper/crypt-btrfs-vdb /dev/mapper/crypt-btrfs-vdc... -btrfs device scan +btrfs filesystem usage /btrfs/pool0 | grep 'Data.*Used' ``` ## Monitoring Disk Health @@ -190,28 +174,39 @@ btrfs device scan ```bash # btrfs device stats shows any errors # Grep for any line not ending in "0" -btrfs device stats /mnt | grep -vE ' 0$' +btrfs device stats /btrfs/pool0 | grep -vE ' 0$' # Show the device IDs for the mounted filesystem -btrfs filesystem show /mnt +btrfs filesystem show /btrfs/pool0 # Delete a device (with ID 8, for example) -btrfs device delete 8 /mnt +btrfs device delete 8 /btrfs/pool0 # Add a device to the array -btrfs device add /dev/vdi1 /mnt +btrfs device add /dev/vdi1 /btrfs/pool0 # Rebalance the array -btrfs balance start /mnt +btrfs balance start --background --full-balance /btrfs/pool0 + +# Check the status +btrfs balance status /btrfs/pool0 ``` ## Defragmenting and Compressing ```bash # Defrag a filesystem -btrfs filesystem defragment /mnt +btrfs filesystem defragment /btrfs/pool0 # Defrag and apply compression # zstd:20 is currently the best compression algorithm -btrfs filesystem defragment -c zstd:20 /mnt +btrfs filesystem defragment -c zstd:20 /btrfs/pool0 +``` + +## Converting ext4 to btrfs + +```bash +# Unmount and then run btrfs-convert +umount /path/to/mount +btrfs-convert /dev/sdX1 ``` \ No newline at end of file