Compare commits

...

3 Commits

Author SHA1 Message Date
b65ef9cbb7 initial smb instructions
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m10s
2025-12-17 12:32:26 -05:00
ea3e8f9c10 add luks and virsh notes from truenas migration 2025-12-17 10:04:59 -05:00
b5aecf1565 add btrfs notes from server setup 2025-12-16 21:40:37 -05:00
4 changed files with 498 additions and 2 deletions

View File

@@ -0,0 +1,217 @@
# BTRFS
- [BTRFS](#btrfs)
- [Creating an Array](#creating-an-array)
- [Mounting the Array](#mounting-the-array)
- [Adding Disks](#adding-disks)
- [Replacing a Disk](#replacing-a-disk)
- [Scrubbing the Array](#scrubbing-the-array)
- [Creating Subvolumes](#creating-subvolumes)
- [Monitoring Usage](#monitoring-usage)
- [Encrypting BTRFS with LUKS](#encrypting-btrfs-with-luks)
- [Monitoring Disk Health](#monitoring-disk-health)
- [Defragmenting and Compressing](#defragmenting-and-compressing)
Oracle [has decent docs here](https://docs.oracle.com/en/operating-systems/oracle-linux/8/btrfs/btrfs-ResizingaBtrfsFileSystem.html)
You'll also want to [read about btrfs compression](https://thelinuxcode.com/enable-btrfs-filesystem-compression/)
## Creating an Array
```bash
# At any point you can check the status of an array by referencing any member
btrfs filesystem show /dev/vdb
```
```bash
# Raid0
mkfs.btrfs --data raid0 --metadata raid0 /dev/vdb /dev/vdc
btrfs device scan
# Raid1
mkfs.btrfs --data raid1 --metadata raid1 /dev/vdb /dev/vdc
btrfs device scan
# Raid1c3
mkfs.btrfs --data raid1c3 --metadata raid1c3 /dev/vdb /dev/vdc /dev/vdd
btrfs device scan
# Raid10
mkfs.btrfs --data raid10 --metadata raid10 /dev/vdb /dev/vdc /dev/vdd /dev/vde
btrfs device scan
# Convert to raid1
# -dconvert == "data convert"
# -mconvert == "metadata convert"
btrfs balance start -dconvert=raid1 -mconvert=raid1 /btrfs
btrfs balance status
```
## Mounting the Array
One off
```bash
# Create a mount point
mkdir /btrfs
# Mount the top level subvolume
mount /dev/vdb /btrfs -o subvolid=5
# Mount with better SSD support
mount /dev/vdb /btrfs -o subvolid=5,ssd
# Mount with auto defragmentation for HDD support
mount /dev/vdb /btrfs -o subvolid=5,autodefrag
# Mount a subvolume
mount /dev/vdb /btrfs -o subvol=home
# Inspect
btrfs filesystem show /btrfs
```
In fstab
```conf
UUID=btrfs_uuid /btrfs btrfs defaults 0 0
```
## Adding Disks
```bash
# Add a disk
btrfs device add /dev/vdd /btrfs
# Watch the expansion
btrfs filesystem usage /btrfs
```
## Replacing a Disk
```bash
# Remove a disk from the array
btrfs device delete /dev/vdb /btrfs
# Add the new device
btrfs device add /dev/vdg /btrfs
```
## Scrubbing the Array
```bash
# Start a scrub to check for errors
# -B prevents the process from going to the background
# -d prints stats for each device
btrfs scrub start -Bd /btrfs
# Check the status of a scrub
btrfs scrub status /btrfs
# Watch for disk failures
dmesg | grep btrfs
```
## Creating Subvolumes
```bash
# Create a new subvolume (make sure to mount /btrfs as subvolid=5)
btrfs subvolume create /btrfs/foo
# List all subvolumes under a path
btrfs subvolume list -t /btrfs
# Delete a subvolume
btrfs subvolume delete /btrfs/foo
```
## Monitoring Usage
```bash
# Quick info for all btrfs arrays
btrfs filesystem show
# Show usage for a specific array
btrfs filesystem usage /btrfs
# Quick command to filter for data used
btrfs filesystem usage /btrfs | grep 'Data.*Used'
```
## Encrypting BTRFS with LUKS
```bash
export KEYFILE_PATH=/root/btrfs.keyfile
export LUKS_DEVS="sdb sdc sdd sde sdf sdg sdh"
# Create a key file
dd if=/dev/urandom of=${KEYFILE_PATH} bs=128 count=1
chmod 400 ${KEYFILE_PATH}
# Create partitions
for luks_dev in $LUKS_DEVS; do
echo Creating partition for /dev/$luks_dev
parted -s -a optimal -- /dev/$luks_dev mklabel gpt mkpart primary 1MiB 100%
done
# Check that your list is good
for luks_dev in $LUKS_DEVS; do
echo will encrypt /dev/${luks_dev}1 and create /dev/mapper/luks-$(lsblk -n -o PARTUUID /dev/${luks_dev}1)
done
# Create the luks partitions
# Note that --iter-time 10000 is how long, in milliseconds, to decrypt the key
# -v is verbose
# -q is "batch mode", don't ask for confirmation
# Longer makes it harder to brute-force
for luks_dev in $LUKS_DEVS; do \
LUKS_UUID=$(lsblk -n -o PARTUUID /dev/${luks_dev}1)
LUKS_NAME=luks-${LUKS_UUID}
echo "Encrypting /dev/${luks_dev}1"; \
cryptsetup luksFormat -v -q --key-file ${KEYFILE_PATH} /dev/${luks_dev}1
echo "Unlocking /dev/${luks_dev}1 as ${LUKS_NAME}"
cryptsetup open /dev/${luks_dev}1 ${LUKS_NAME} --key-file=${KEYFILE_PATH}
echo "Adding ${LUKS_NAME} UUID=${LUKS_UUID} ${KEYFILE_PATH} discard to crypttab"
echo "${LUKS_NAME} UUID=${LUKS_UUID} none discard" >> /etc/crypttab
done
# List filesystems with UUID
lsblk --fs
# Now create the array using the /dev/mapper entries from above
mkfs.btrfs --data raid1 --metadata raid1 /dev/mapper/crypt-btrfs-vdb /dev/mapper/crypt-btrfs-vdc...
btrfs device scan
```
## Monitoring Disk Health
<https://www.dotlinux.net/blog/how-to-configure-smartd-and-be-notified-of-hard-disk-problems-via-email/#installing-smartd-smartmontools>
```bash
# btrfs device stats shows any errors
# Grep for any line not ending in "0"
btrfs device stats /mnt | grep -vE ' 0$'
# Show the device IDs for the mounted filesystem
btrfs filesystem show /mnt
# Delete a device (with ID 8, for example)
btrfs device delete 8 /mnt
# Add a device to the array
btrfs device add /dev/vdi1 /mnt
# Rebalance the array
btrfs balance start /mnt
```
## Defragmenting and Compressing
```bash
# Defrag a filesystem
btrfs filesystem defragment /mnt
# Defrag and apply compression
# zstd:20 is currently the best compression algorithm
btrfs filesystem defragment -c zstd:20 /mnt
```

View File

@@ -0,0 +1,91 @@
# LUKS
Disk Encryption
## Encrypting a Drive
You get 8 key slots total.
```bash
# Remember to install if you need it
dnf install cryptsetup
# Create an encryption key
mkdir /etc/luks-keys
chmod 700 /etc/luks-keys
dd if=/dev/urandom bs=128 count=1 of=/etc/luks-keys/data0.key
# Create an encrypted partition
# -q means don't ask for confirmation
# -v means verbose
cryptsetup -q -v luksFormat /dev/nvme6n1p1 /etc/luks-keys/data0.key
# Unlock
cryptsetup -q -v luksOpen --key-file /etc/luks-keys/data0.key /dev/nvme6n1p1 luks-$(cryptsetup luksUUID /dev/nvme6n1p1)
# List keys
cryptsetup luksDump /dev/nvme6n1p1
# Remove a key from a slot
cryptsetup luksKillSlot /dev/nvme6n1p1 2
# Add a new key to a slot
cryptsetup luksAddKey /dev/nvme6n1p1 -S 5
```
## TPM2 Decryption
Mostly taken from here:
<https://gist.github.com/jdoss/777e8b52c8d88eb87467935769c98a95>
PCR reference for `--tpm2-pcrs` args
```text
0: System firmware executable
2: Kernel
4: Bootloader
7: Secure boot state
8: Cmdline
9: Initrd
```
Basic commands:
```bash
# Show tpm2 devices
systemd-cryptenroll --tpm2-device=list
# Show crypto luks block devices
blkid -t TYPE=crypto_LUKS
# Enroll the tpm2 device with systemd-cryptenroll
systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=0,2,4,7,8,9 /dev/nvme0n1p3
# Reenroll
systemd-cryptenroll /dev/nvme0n1p3 --wipe-slot=tpm2 --tpm2-device=auto --tpm2-pcrs=0,2,4,7,8,9
```
Note, you'll need to add `rd.luks.options=tpm2-device=auto` to your kernel parameters.
## Batch Operations
```bash
# Create encrypted drives in bulk
export LUKS_DEVS="/dev/nvme4n1p1 /dev/nvme3n1p1 /dev/nvme0n1p1 /dev/nvme1n1p4 /dev/nvme2n1p1 /dev/nvme5n1p1"
for luks_drive in $LUKS_DRIVES; do
cryptsetup -q -v luksFormat /dev/${luks_drive} /etc/luks-keys/data0.key
done
# Unlock encrypted drives in bulk
export LUKS_DEVS="/dev/nvme4n1p1 /dev/nvme3n1p1 /dev/nvme0n1p1 /dev/nvme1n1p4 /dev/nvme2n1p1 /dev/nvme5n1p1"
for luks_drive in $LUKS_DRIVES; do
cryptsetup -q -v luksOpen --key-file /etc/luks-keys/data0.key /dev/${luks_drive} luks-$(cryptsetup luksUUID /dev/${luks_drive})
done
# Add new keys in bulk
export LUKS_DEVS="/dev/nvme4n1p1 /dev/nvme3n1p1 /dev/nvme0n1p1 /dev/nvme1n1p4 /dev/nvme2n1p1 /dev/nvme5n1p1"
for luks_dev in $LUKS_DEVS; do
echo Adding key to $luks_dev
cryptsetup luksAddKey $luks_dev -S 2
done
```

View File

@@ -0,0 +1,97 @@
# SMB
- [SMB](#smb)
- [Install SMB](#install-smb)
- [Create SMB User](#create-smb-user)
- [Create a SMB Share](#create-a-smb-share)
- [Create a SMB Share with Many Users](#create-a-smb-share-with-many-users)
## Install SMB
```bash
sudo dnf install samba
sudo systemctl enable smb --now
firewall-cmd --get-active-zones
sudo firewall-cmd --permanent --zone=FedoraServer --add-service=samba
sudo firewall-cmd --reload
```
## Create SMB User
```bash
sudo smbpasswd -a ducoterra
```
## Create a SMB Share
```bash
# Create share
mkdir /btrfs/pool0/smb/ducoterra
# Set proper selinux labels for samba
sudo semanage fcontext --add --type "samba_share_t" "/btrfs/pool0/smb/ducoterra(/.*)?"
# Run restorecon at the root of the btrfs subvolume
sudo restorecon -R /btrfs/pool0
```
Edit /etc/samba/smb.conf
```conf
[ducoterra]
comment = My Share
path = /btrfs/pool0/smb/ducoterra
writeable = yes
browseable = yes
public = no
create mask = 0644
directory mask = 0755
write list = user
```
Then restart SMB
```bash
sudo systemctl restart smb
```
## Create a SMB Share with Many Users
```bash
sudo groupadd myfamily
sudo useradd -G myfamily jack
sudo useradd -G myfamily maria
sudo smbpasswd -a jack
sudo smbpasswd -a maria
sudo mkdir /home/share
sudo chgrp myfamily /home/share
sudo chmod 770 /home/share
sudo semanage fcontext --add --type "samba_share_t" "/home/share(/.*)?"
sudo restorecon -R /home/share
```
```conf
[family]
comment = Family Share
path = /home/share
writeable = yes
browseable = yes
public = yes
valid users = @myfamily
create mask = 0660
directory mask = 0770
force group = +myfamily
```
- valid users: only users of the group family have access rights. The @ denotes a group name.
- force group = +myfamily: files and directories are created with this group, instead of the user group.
- create mask = 0660: files in the share are created with permissions to allow all group users to read and write files created by other users.
- directory mask = 0770: as before, but for directories.
Don't forget to restart smb
```bash
systemctl restart smb
```

View File

@@ -16,10 +16,12 @@ Virtual Machine Management
- [Set a Static IP](#set-a-static-ip)
- [Creating VMs](#creating-vms)
- [Create VM with No Graphics and use an Existing QCOW2 Disk](#create-vm-with-no-graphics-and-use-an-existing-qcow2-disk)
- [Create a Cloud Init Compatible VM](#create-a-cloud-init-compatible-vm)
- [Create a Cloud Init Compatible VM](#create-a-cloud-init-compatible-vm)
- [Create VM with Graphics using an ISO Installation Disk](#create-vm-with-graphics-using-an-iso-installation-disk)
- [Create VM using Host Device as Disk](#create-vm-using-host-device-as-disk)
- [Create a Home Assistant VM](#create-a-home-assistant-vm)
- [Snapshots](#snapshots)
- [Creating and Attaching Disks](#creating-and-attaching-disks)
- [Virt Builder](#virt-builder)
## Before you Begin
@@ -55,6 +57,12 @@ Virtual Machine Management
export LIBVIRT_DEFAULT_URI='qemu+ssh://user@server/system'
```
Or for Truenas
```bash
export LIBVIRT_DEFAULT_URI='qemu+ssh://root@truenas/system?socket=/run/truenas_libvirt/libvirt-sock'
```
## Useful Virsh Commands
```bash
@@ -234,7 +242,7 @@ virt-install \
--import --disk "path=${VM_DISK_PATH},bus=virtio"
```
#### Create a Cloud Init Compatible VM
### Create a Cloud Init Compatible VM
<https://cloudinit.readthedocs.io/en/latest/reference/examples.html>
@@ -318,10 +326,93 @@ virt-install \
--disk none
```
### Create a Home Assistant VM
```bash
virt-install \
--name haos \
--description "Home Assistant OS" \
--os-variant=generic \
--ram=4096 \
--vcpus=2 \
--disk /var/lib/libvirt/images/haos_ova-16.3.qcow2,bus=scsi \
--controller type=scsi,model=virtio-scsi \
--import \
--graphics none \
--boot uefi
```
## Snapshots
See [qemu qcow2 snapshots](/active/software_qemu/qemu.md#qcow2-snapshots)
## Creating and Attaching Disks
To create and attach one disk:
```bash
export VM_NAME="cloud-init-test-fedora"
export VM_DISK_NAME="test1"
qemu-img create -f qcow2 /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2 1G
virsh attach-disk ${VM_NAME} \
--source /var/lib/libvirt/images/${VM_DISK_NAME} \
--target vdb \
--persistent
--live
```
To create and attach multiple disks (for raid testing)
```bash
export VM_NAME="cloud-init-test-fedora"
# Max supported for this script is 25
export VM_NUM_DISKS=8
export VM_DISK_SIZE=4G
##### Attach #####
# Create the disks and target mounts from our array
letters=($(echo {a..z}))
for disk_num in $(seq 1 $VM_NUM_DISKS); do
VM_DISK_NAME="test-${disk_num}"
VM_DISK_TARGET=vd${letters[$disk_num]}
echo "Creating /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2"
sudo qemu-img create -f qcow2 /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2 ${VM_DISK_SIZE}
echo "Attaching vd${letters[$disk_num]} to ${VM_NAME}"
virsh attach-disk ${VM_NAME} \
--source /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2 \
--target vd${letters[$disk_num]} \
--persistent \
--subdriver qcow2 \
--live
done;
##### Cleanup #####
# Detach the disks from our VMs
letters=($(echo {a..z}))
for disk_num in $(seq 1 $VM_NUM_DISKS); do
VM_DISK_NAME="test-${disk_num}"
VM_DISK_TARGET=vd${letters[$disk_num]}
echo "Detaching vd${letters[$disk_num]} from ${VM_NAME}"
virsh detach-disk ${VM_NAME} \
--target vd${letters[$disk_num]} \
--persistent
done;
# Optionally delete images
letters=($(echo {a..z}))
for disk_num in $(seq 1 $VM_NUM_DISKS); do
VM_DISK_NAME="test-${disk_num}"
VM_DISK_TARGET=vd${letters[$disk_num]}
echo "Removing /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2"
sudo rm /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2
done;
```
## Virt Builder
<https://docs.fedoraproject.org/en-US/fedora-server/virtualization/vm-install-diskimg-virtbuilder/#_minimal_effort_customization>