Compare commits
11 Commits
380d8f8e48
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
d4571c9b70
|
|||
|
4c0a263d50
|
|||
|
70259d9542
|
|||
|
4f3102a2ff
|
|||
|
ed65f8924d
|
|||
|
737a58a13c
|
|||
|
a2cef18efe
|
|||
|
1c245a593a
|
|||
|
b65ef9cbb7
|
|||
|
ea3e8f9c10
|
|||
|
b5aecf1565
|
297
active/device_framework_desktop/framework_desktop.md
Normal file
297
active/device_framework_desktop/framework_desktop.md
Normal file
@@ -0,0 +1,297 @@
|
||||
# Framework Desktop
|
||||
|
||||
- [Framework Desktop](#framework-desktop)
|
||||
- [BIOS](#bios)
|
||||
- [References](#references)
|
||||
- [Notes](#notes)
|
||||
- [Volume Locations](#volume-locations)
|
||||
- [Setup](#setup)
|
||||
- [Create the AI user](#create-the-ai-user)
|
||||
- [Helper aliases](#helper-aliases)
|
||||
- [Create the models dir](#create-the-models-dir)
|
||||
- [Install the Hugging Face CLI](#install-the-hugging-face-cli)
|
||||
- [Download models](#download-models)
|
||||
- [Text models](#text-models)
|
||||
- [Image models](#image-models)
|
||||
- [Create the systemd-ai pod](#create-the-systemd-ai-pod)
|
||||
- [llama.cpp](#llamacpp)
|
||||
- [stable-diffusion.cpp](#stable-diffusioncpp)
|
||||
- [open-webui](#open-webui)
|
||||
- [Install the whole thing with quadlets (TM)](#install-the-whole-thing-with-quadlets-tm)
|
||||
- [Install the update script](#install-the-update-script)
|
||||
|
||||
## BIOS
|
||||
|
||||
<https://knowledgebase.frame.work/en_us/changing-memory-allocation-amd-ryzen-ai-max-300-series-By1LG5Yrll>
|
||||
|
||||
1. Set GPU memory to 512MB
|
||||
|
||||
## References
|
||||
|
||||
<https://docs.podman.io/en/latest/markdown/podman-systemd.unit.5.html>
|
||||
|
||||
## Notes
|
||||
|
||||
### Volume Locations
|
||||
|
||||
`~/.local/share/containers/storage/volumes/`
|
||||
|
||||
## Setup
|
||||
|
||||
### Create the AI user
|
||||
|
||||
```bash
|
||||
# Create your local ai user. This will be the user you launch podman processes from.
|
||||
useradd -m ai
|
||||
loginctl enable-linger ai
|
||||
su -l ai
|
||||
mkdir -p ~/.config/containers/systemd/
|
||||
```
|
||||
|
||||
Models are big. You'll want some tools to help find large files quickly when space runs out.
|
||||
|
||||
### Helper aliases
|
||||
|
||||
Add these to your .bashrc:
|
||||
|
||||
```bash
|
||||
# Calculate all folder sizes in current dir
|
||||
alias {dudir,dud}='du -h --max-depth 1 | sort -h'
|
||||
|
||||
# Calculate all file sizes in current dir
|
||||
alias {dufile,duf}='ls -lhSr'
|
||||
|
||||
# Restart llama-server / follow logs
|
||||
alias llama-reload="systemctl --user daemon-reload && systemctl --user restart llama-server.service"
|
||||
alias llama-logs="journalctl --user -fu llama-server"
|
||||
|
||||
# Restart stable diffusion gen and edit server / follow logs
|
||||
alias sd-gen-reload='systemctl --user daemon-reload && systemctl --user restart stable-diffusion-gen-server'
|
||||
alias sd-gen-logs='journalctl --user -xeu stable-diffusion-gen-server'
|
||||
alias sd-edit-reload='systemctl --user daemon-reload && systemctl --user restart stable-diffusion-edit-server'
|
||||
alias sd-edit-logs='journalctl --user -xeu stable-diffusion-edit-server'
|
||||
```
|
||||
|
||||
### Create the models dir
|
||||
|
||||
```bash
|
||||
mkdir -p /home/ai/models/{text,image,video}
|
||||
```
|
||||
|
||||
### Install the Hugging Face CLI
|
||||
|
||||
<https://huggingface.co/docs/huggingface_hub/en/guides/cli#getting-started>
|
||||
|
||||
```bash
|
||||
# Install
|
||||
curl -LsSf https://hf.co/cli/install.sh | bash
|
||||
|
||||
# Login
|
||||
hf auth login
|
||||
```
|
||||
|
||||
### Download models
|
||||
|
||||
#### Text models
|
||||
|
||||
<https://huggingface.co/ggml-org/collections>
|
||||
|
||||
```bash
|
||||
# gpt-oss-120b
|
||||
mkdir /home/ai/models/text/gpt-oss-120b
|
||||
hf download --local-dir /home/ai/models/text/gpt-oss-120b ggml-org/gpt-oss-120b-GGUF
|
||||
|
||||
# gpt-oss-20b
|
||||
mkdir /home/ai/models/text/gpt-oss-20b
|
||||
hf download --local-dir /home/ai/models/text/gpt-oss-20b ggml-org/gpt-oss-20b-GGUF
|
||||
|
||||
# devstral-2-123b
|
||||
mkdir /home/ai/models/text/devstral-2-123b
|
||||
hf download --local-dir /home/ai/models/text/devstral-2-123b unsloth/Devstral-2-123B-Instruct-2512-GGUF Q4_K_M/Devstral-2-123B-Instruct-2512-Q4_K_M-00001-of-00002.gguf
|
||||
hf download --local-dir /home/ai/models/text/devstral-2-123b unsloth/Devstral-2-123B-Instruct-2512-GGUF Q4_K_M/Devstral-2-123B-Instruct-2512-Q4_K_M-00002-of-00002.gguf
|
||||
|
||||
# devstral-small-2-24b
|
||||
mkdir /home/ai/models/text/devstral-small-2-24b
|
||||
hf download --local-dir /home/ai/models/text/devstral-small-2-24b unsloth/Devstral-Small-2-24B-Instruct-2512-GGUF Devstral-Small-2-24B-Instruct-2512-Q4_K_M.gguf
|
||||
|
||||
# ministral-3-14b
|
||||
mkdir /home/ai/models/text/ministral-3-14b
|
||||
hf download --local-dir /home/ai/models/text/ministral-3-14b ggml-org/Ministral-3-14B-Reasoning-2512-GGUF
|
||||
|
||||
# ministral-3-3b-instruct
|
||||
mkdir /home/ai/models/text/ministral-3-3b-instruct
|
||||
hf download --local-dir /home/ai/models/text/ministral-3-3b-instruct ggml-org/Ministral-3-3B-Instruct-2512-GGUF
|
||||
|
||||
# nemotron-nano-30b
|
||||
mkdir /home/ai/models/text/nemotron-nano-30b
|
||||
hf download --local-dir /home/ai/models/text/nemotron-nano-30b ggml-org/Nemotron-Nano-3-30B-A3B-GGUF Nemotron-Nano-3-30B-A3B-Q4_K_M.gguf
|
||||
|
||||
# qwen3-30b-a3b-instruct
|
||||
mkdir /home/ai/models/text/qwen3-30b-a3b-instruct
|
||||
hf download --local-dir /home/ai/models/text/qwen3-30b-a3b-instruct ggml-org/Qwen3-30B-A3B-Instruct-2507-Q8_0-GGUF
|
||||
|
||||
# qwen3-coder-30b-a3b-instruct
|
||||
mkdir /home/ai/models/text/qwen3-coder-30b-a3b-instruct
|
||||
hf download --local-dir /home/ai/models/text/qwen3-coder-30b-a3b-instruct ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF
|
||||
```
|
||||
|
||||
#### Image models
|
||||
|
||||
```bash
|
||||
# z-turbo
|
||||
mkdir /home/ai/models/image/z-turbo
|
||||
hf download --local-dir /home/ai/models/image/z-turbo QuantStack/FLUX.1-Kontext-dev-GGUF flux1-kontext-dev-Q4_K_M.gguf
|
||||
hf download --local-dir /home/ai/models/image/z-turbo black-forest-labs/FLUX.1-schnell ae.safetensors
|
||||
hf download --local-dir /home/ai/models/image/z-turbo unsloth/Qwen3-4B-Instruct-2507-GGUF Qwen3-4B-Instruct-2507-Q4_K_M.gguf
|
||||
|
||||
# flux-1-kontext
|
||||
mkdir /home/ai/models/image/flux-1-kontext
|
||||
hf download --local-dir /home/ai/models/image/flux-1-kontext leejet/Z-Image-Turbo-GGUF z_image_turbo-Q4_K.gguf
|
||||
hf download --local-dir /home/ai/models/image/flux-1-kontext black-forest-labs/FLUX.1-dev ae.safetensors
|
||||
hf download --local-dir /home/ai/models/image/flux-1-kontext comfyanonymous/flux_text_encoders clip_l.safetensors
|
||||
hf download --local-dir /home/ai/models/image/flux-1-kontext comfyanonymous/flux_text_encoders t5xxl_fp16.safetensors
|
||||
```
|
||||
|
||||
### Create the systemd-ai pod
|
||||
|
||||
You'll at least want the ai pod and network. Copy `ai.pod` and `ai.network` out
|
||||
of `quadlets` into `~/.config/containers/systemd`.
|
||||
|
||||
Then run `systemctl --user daemon-reload && systemctl --user start ai-pod`
|
||||
|
||||
## llama.cpp
|
||||
|
||||
<https://github.com/ggml-org/llama.cpp/tree/master/tools/server>
|
||||
|
||||
```bash
|
||||
# Build the llama.cpp container image
|
||||
git clone https://github.com/ggml-org/llama.cpp.git
|
||||
cd llama.cpp
|
||||
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
# Vulkan
|
||||
podman build -f .devops/vulkan.Dockerfile -t llama-cpp-vulkan:${BUILD_TAG} -t llama-cpp-vulkan:latest .
|
||||
|
||||
# Run llama server (Available on port 8000)
|
||||
# Add `--n-cpu-moe 32` to gpt-oss-120b to keep minimal number of expert in GPU
|
||||
podman run \
|
||||
--rm \
|
||||
--name llama-server-demo \
|
||||
--pod systemd-ai \
|
||||
--device=/dev/kfd \
|
||||
--device=/dev/dri \
|
||||
-v /home/ai/models/text:/models:z \
|
||||
localhost/llama-cpp-vulkan:latest \
|
||||
--port 8000 \
|
||||
-c 64000 \
|
||||
-b 64000 \
|
||||
-ub 500 \
|
||||
--perf \
|
||||
--n-gpu-layers all \
|
||||
--jinja \
|
||||
--models-max 1 \
|
||||
--models-dir /models
|
||||
```
|
||||
|
||||
## stable-diffusion.cpp
|
||||
|
||||
Server: <https://github.com/leejet/stable-diffusion.cpp/tree/master/examples/server>
|
||||
|
||||
CLI: <https://github.com/leejet/stable-diffusion.cpp/tree/master/examples/cli>
|
||||
|
||||
```bash
|
||||
git clone https://github.com/leejet/stable-diffusion.cpp.git
|
||||
cd stable-diffusion.cpp
|
||||
git submodule update --init --recursive
|
||||
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
# Vulkan
|
||||
podman build -f Dockerfile.vulkan -t stable-diffusion-cpp:${BUILD_TAG} -t stable-diffusion-cpp:latest .
|
||||
```
|
||||
|
||||
```bash
|
||||
# z-turbo
|
||||
podman run --rm \
|
||||
-v /home/ai/models:/models:z \
|
||||
-v /home/ai/output:/output:z \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
localhost/stable-diffusion-cpp:latest \
|
||||
--diffusion-model /models/image/z-turbo/z_image_turbo-Q4_K.gguf \
|
||||
--vae /models/image/z-turbo/ae.safetensors \
|
||||
--llm /models/image/z-turbo/Qwen3-4B-Instruct-2507-Q4_K_M.gguf \
|
||||
--cfg-scale 1.0 \
|
||||
-v \
|
||||
-H 1024 \
|
||||
-W 1024 \
|
||||
--seed -1 \
|
||||
--steps 8 \
|
||||
--vae-conv-direct \
|
||||
-o /output/output.png \
|
||||
-p "A photorealistic dragon"
|
||||
|
||||
# Edit with flux kontext
|
||||
podman run --rm \
|
||||
-v /home/ai/models:/models:z \
|
||||
-v /home/ai/output:/output:z \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
localhost/stable-diffusion-cpp:latest \
|
||||
--diffusion-model /models/image/flux-1-kontext/flux1-kontext-dev-Q4_K_M.gguf \
|
||||
--vae /models/image/flux-1-kontext/ae.safetensors \
|
||||
--clip_l /models/image/flux-1-kontext/clip_l.safetensors \
|
||||
--t5xxl /models/image/flux-1-kontext/t5xxl_fp16.safetensors \
|
||||
--cfg-scale 1.0 \
|
||||
--sampling-method euler \
|
||||
--seed -1 \
|
||||
--steps 28 \
|
||||
--vae-conv-direct \
|
||||
-v \
|
||||
-H 512 \
|
||||
-W 512 \
|
||||
-o /output/output.png \
|
||||
-r /output/everquest_logo.png \
|
||||
-p "Add the text 'EverQuest'"
|
||||
```
|
||||
|
||||
## open-webui
|
||||
|
||||
```bash
|
||||
mkdir /home/ai/.env
|
||||
# Create a file called open-webui-env with `WEBUI_SECRET_KEY="some-random-key"
|
||||
scp active/device_framework_desktop/secrets/open-webui-env deskwork-ai:.env/
|
||||
|
||||
# Will be available on port 8080
|
||||
podman run \
|
||||
-d \
|
||||
--pod ai \
|
||||
-v open-webui:/app/backend/data \
|
||||
--name open-webui \
|
||||
--restart always \
|
||||
ghcr.io/open-webui/open-webui:main
|
||||
```
|
||||
|
||||
## Install the whole thing with quadlets (TM)
|
||||
|
||||
```bash
|
||||
# Installs and runs all services in `quadlets/`
|
||||
scp -r active/device_framework_desktop/quadlets/* deskwork-ai:.config/containers/systemd/
|
||||
ssh deskwork-ai
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user restart ai-pod.service
|
||||
```
|
||||
|
||||
Note, all services will be available at `host.containers.internal`. So llama.cpp
|
||||
will be up at `http://host.containers.internal:8000`.
|
||||
|
||||
### Install the update script
|
||||
|
||||
```bash
|
||||
# 1. Builds the latest llama.cpp and stable-diffusion.cpp
|
||||
# 2. Pulls the latest open-webui
|
||||
# 3. Restarts all services
|
||||
scp active/device_framework_desktop/update-script.sh deskwork:
|
||||
ssh deskwork-ai
|
||||
chmod +x update-script.sh
|
||||
./update-script.sh
|
||||
```
|
||||
2
active/device_framework_desktop/quadlets/ai.network
Normal file
2
active/device_framework_desktop/quadlets/ai.network
Normal file
@@ -0,0 +1,2 @@
|
||||
[Network]
|
||||
IPv6=true
|
||||
8
active/device_framework_desktop/quadlets/ai.pod
Normal file
8
active/device_framework_desktop/quadlets/ai.pod
Normal file
@@ -0,0 +1,8 @@
|
||||
[Pod]
|
||||
Network=ai.network
|
||||
# llama.cpp
|
||||
PublishPort=8000:8000/tcp
|
||||
# open-webui
|
||||
PublishPort=8080:8080/tcp
|
||||
# stable-diffusion.cpp
|
||||
PublishPort=1234:1234/tcp
|
||||
@@ -0,0 +1,44 @@
|
||||
[Unit]
|
||||
Description=A Llama CPP Server Running GPT OSS 120b
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Image is built locally via podman build
|
||||
Image=localhost/llama-cpp-vulkan:latest
|
||||
|
||||
# Downloaded models volume
|
||||
Volume=/home/ai/models/text:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Server command
|
||||
Exec=--port 8000 \
|
||||
-c 48000 \
|
||||
-b 48000 \
|
||||
-ub 500 \
|
||||
--perf \
|
||||
--n-gpu-layers all \
|
||||
--jinja \
|
||||
--models-max 1 \
|
||||
--models-dir /models
|
||||
|
||||
# Health Check
|
||||
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8000/props?model=gpt-oss-120b || exit 1
|
||||
HealthInterval=10s
|
||||
HealthRetries=3
|
||||
HealthStartPeriod=10s
|
||||
HealthTimeout=30s
|
||||
HealthOnFailure=kill
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=An Open Webui Frontend for Local AI Services
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Open Webui base image
|
||||
Image=ghcr.io/open-webui/open-webui:main
|
||||
|
||||
# Nothing too complicated here. Open Webui will basically configure itself.
|
||||
Volume=open-webui-data:/app/backend/data
|
||||
|
||||
# WEBUI_SECRET_KEY is required to prevent logout on Restart
|
||||
EnvironmentFile=/home/ai/.env/open-webui-env
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,42 @@
|
||||
[Unit]
|
||||
Description=A Stable Diffusion CPP Server for Editing Images
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Vulkan image for AMD GPU
|
||||
Image=localhost/stable-diffusion-cpp:latest
|
||||
|
||||
# Shared models directory
|
||||
Volume=/home/ai/models:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Override entrypoint to use server
|
||||
Entrypoint=/sd-server
|
||||
|
||||
# Server args
|
||||
Exec=-l 0.0.0.0 \
|
||||
--listen-port 1235 \
|
||||
--diffusion-model /models/image/flux-1-kontext/flux1-kontext-dev-Q4_K_M.gguf \
|
||||
--vae /models/image/flux-1-kontext/ae.safetensors \
|
||||
--clip_l /models/image/flux-1-kontext/clip_l.safetensors \
|
||||
--t5xxl /models/image/flux-1-kontext/t5xxl_fp16.safetensors \
|
||||
--cfg-scale 1.0 \
|
||||
--sampling-method euler \
|
||||
--vae-conv-direct \
|
||||
--seed -1 \
|
||||
--steps 28 \
|
||||
-v
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,42 @@
|
||||
[Unit]
|
||||
Description=A Stable Diffusion CPP Server for Generating Images
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Vulkan image for AMD GPU
|
||||
Image=localhost/stable-diffusion-cpp:latest
|
||||
|
||||
# Shared models directory
|
||||
Volume=/home/ai/models:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Override entrypoint to use server
|
||||
Entrypoint=/sd-server
|
||||
|
||||
# Server args
|
||||
Exec=-l 0.0.0.0 \
|
||||
--listen-port 1234 \
|
||||
--diffusion-model /models/image/z-turbo/z_image_turbo-Q4_K.gguf \
|
||||
--vae /models/image/z-turbo/ae.safetensors \
|
||||
--llm /models/image/z-turbo/qwen_3_4b.safetensors \
|
||||
-l 0.0.0.0 \
|
||||
--listen-port 1234 \
|
||||
--cfg-scale 1.0 \
|
||||
--vae-conv-direct \
|
||||
-v \
|
||||
--seed -1 \
|
||||
--steps 8
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
20
active/device_framework_desktop/update-script.sh
Normal file
20
active/device_framework_desktop/update-script.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
|
||||
|
||||
echo "Updates stable-diffusion.cpp, llama.cpp, and open-webui"
|
||||
|
||||
cd /home/ai/llama.cpp
|
||||
git pull
|
||||
podman build -t llama-cpp-vulkan:${BUILD_TAG} -t llama-cpp-vulkan:latest -f .devops/vulkan.Dockerfile .
|
||||
|
||||
cd /home/ai/stable-diffusion.cpp
|
||||
git pull
|
||||
git submodule update --init --recursive
|
||||
podman build -f Dockerfile.vulkan -t stable-diffusion-cpp:${BUILD_TAG} -t stable-diffusion-cpp:latest .
|
||||
|
||||
podman image pull ghcr.io/open-webui/open-webui:main
|
||||
|
||||
systemctl --user restart ai-pod
|
||||
217
active/software_btrfs/btrfs.md
Normal file
217
active/software_btrfs/btrfs.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# BTRFS
|
||||
|
||||
- [BTRFS](#btrfs)
|
||||
- [Creating an Array](#creating-an-array)
|
||||
- [Mounting the Array](#mounting-the-array)
|
||||
- [Adding Disks](#adding-disks)
|
||||
- [Replacing a Disk](#replacing-a-disk)
|
||||
- [Scrubbing the Array](#scrubbing-the-array)
|
||||
- [Creating Subvolumes](#creating-subvolumes)
|
||||
- [Monitoring Usage](#monitoring-usage)
|
||||
- [Encrypting BTRFS with LUKS](#encrypting-btrfs-with-luks)
|
||||
- [Monitoring Disk Health](#monitoring-disk-health)
|
||||
- [Defragmenting and Compressing](#defragmenting-and-compressing)
|
||||
|
||||
Oracle [has decent docs here](https://docs.oracle.com/en/operating-systems/oracle-linux/8/btrfs/btrfs-ResizingaBtrfsFileSystem.html)
|
||||
|
||||
You'll also want to [read about btrfs compression](https://thelinuxcode.com/enable-btrfs-filesystem-compression/)
|
||||
|
||||
## Creating an Array
|
||||
|
||||
```bash
|
||||
# At any point you can check the status of an array by referencing any member
|
||||
btrfs filesystem show /dev/vdb
|
||||
```
|
||||
|
||||
```bash
|
||||
# Raid0
|
||||
mkfs.btrfs --data raid0 --metadata raid0 /dev/vdb /dev/vdc
|
||||
btrfs device scan
|
||||
|
||||
# Raid1
|
||||
mkfs.btrfs --data raid1 --metadata raid1 /dev/vdb /dev/vdc
|
||||
btrfs device scan
|
||||
|
||||
# Raid1c3
|
||||
mkfs.btrfs --data raid1c3 --metadata raid1c3 /dev/vdb /dev/vdc /dev/vdd
|
||||
btrfs device scan
|
||||
|
||||
# Raid10
|
||||
mkfs.btrfs --data raid10 --metadata raid10 /dev/vdb /dev/vdc /dev/vdd /dev/vde
|
||||
btrfs device scan
|
||||
|
||||
# Convert to raid1
|
||||
# -dconvert == "data convert"
|
||||
# -mconvert == "metadata convert"
|
||||
btrfs balance start -dconvert=raid1 -mconvert=raid1 /btrfs
|
||||
btrfs balance status
|
||||
```
|
||||
|
||||
## Mounting the Array
|
||||
|
||||
One off
|
||||
|
||||
```bash
|
||||
# Create a mount point
|
||||
mkdir /btrfs
|
||||
|
||||
# Mount the top level subvolume
|
||||
mount /dev/vdb /btrfs -o subvolid=5
|
||||
|
||||
# Mount with better SSD support
|
||||
mount /dev/vdb /btrfs -o subvolid=5,ssd
|
||||
|
||||
# Mount with auto defragmentation for HDD support
|
||||
mount /dev/vdb /btrfs -o subvolid=5,autodefrag
|
||||
|
||||
# Mount a subvolume
|
||||
mount /dev/vdb /btrfs -o subvol=home
|
||||
|
||||
# Inspect
|
||||
btrfs filesystem show /btrfs
|
||||
```
|
||||
|
||||
In fstab
|
||||
|
||||
```conf
|
||||
UUID=btrfs_uuid /btrfs btrfs defaults 0 0
|
||||
```
|
||||
|
||||
## Adding Disks
|
||||
|
||||
```bash
|
||||
# Add a disk
|
||||
btrfs device add /dev/vdd /btrfs
|
||||
|
||||
# Watch the expansion
|
||||
btrfs filesystem usage /btrfs
|
||||
```
|
||||
|
||||
## Replacing a Disk
|
||||
|
||||
```bash
|
||||
# Remove a disk from the array
|
||||
btrfs device delete /dev/vdb /btrfs
|
||||
|
||||
# Add the new device
|
||||
btrfs device add /dev/vdg /btrfs
|
||||
```
|
||||
|
||||
## Scrubbing the Array
|
||||
|
||||
```bash
|
||||
# Start a scrub to check for errors
|
||||
# -B prevents the process from going to the background
|
||||
# -d prints stats for each device
|
||||
btrfs scrub start -Bd /btrfs
|
||||
|
||||
# Check the status of a scrub
|
||||
btrfs scrub status /btrfs
|
||||
|
||||
# Watch for disk failures
|
||||
dmesg | grep btrfs
|
||||
```
|
||||
|
||||
## Creating Subvolumes
|
||||
|
||||
```bash
|
||||
# Create a new subvolume (make sure to mount /btrfs as subvolid=5)
|
||||
btrfs subvolume create /btrfs/foo
|
||||
|
||||
# List all subvolumes under a path
|
||||
btrfs subvolume list -t /btrfs
|
||||
|
||||
# Delete a subvolume
|
||||
btrfs subvolume delete /btrfs/foo
|
||||
```
|
||||
|
||||
## Monitoring Usage
|
||||
|
||||
```bash
|
||||
# Quick info for all btrfs arrays
|
||||
btrfs filesystem show
|
||||
|
||||
# Show usage for a specific array
|
||||
btrfs filesystem usage /btrfs
|
||||
|
||||
# Quick command to filter for data used
|
||||
btrfs filesystem usage /btrfs | grep 'Data.*Used'
|
||||
```
|
||||
|
||||
## Encrypting BTRFS with LUKS
|
||||
|
||||
```bash
|
||||
export KEYFILE_PATH=/root/btrfs.keyfile
|
||||
export LUKS_DEVS="sdb sdc sdd sde sdf sdg sdh"
|
||||
|
||||
# Create a key file
|
||||
dd if=/dev/urandom of=${KEYFILE_PATH} bs=128 count=1
|
||||
chmod 400 ${KEYFILE_PATH}
|
||||
|
||||
# Create partitions
|
||||
for luks_dev in $LUKS_DEVS; do
|
||||
echo Creating partition for /dev/$luks_dev
|
||||
parted -s -a optimal -- /dev/$luks_dev mklabel gpt mkpart primary 1MiB 100%
|
||||
done
|
||||
|
||||
# Check that your list is good
|
||||
for luks_dev in $LUKS_DEVS; do
|
||||
echo will encrypt /dev/${luks_dev}1 and create /dev/mapper/luks-$(lsblk -n -o PARTUUID /dev/${luks_dev}1)
|
||||
done
|
||||
|
||||
# Create the luks partitions
|
||||
# Note that --iter-time 10000 is how long, in milliseconds, to decrypt the key
|
||||
# -v is verbose
|
||||
# -q is "batch mode", don't ask for confirmation
|
||||
# Longer makes it harder to brute-force
|
||||
for luks_dev in $LUKS_DEVS; do \
|
||||
LUKS_UUID=$(lsblk -n -o PARTUUID /dev/${luks_dev}1)
|
||||
LUKS_NAME=luks-${LUKS_UUID}
|
||||
echo "Encrypting /dev/${luks_dev}1"; \
|
||||
cryptsetup luksFormat -v -q --key-file ${KEYFILE_PATH} /dev/${luks_dev}1
|
||||
echo "Unlocking /dev/${luks_dev}1 as ${LUKS_NAME}"
|
||||
cryptsetup open /dev/${luks_dev}1 ${LUKS_NAME} --key-file=${KEYFILE_PATH}
|
||||
echo "Adding ${LUKS_NAME} UUID=${LUKS_UUID} ${KEYFILE_PATH} discard to crypttab"
|
||||
echo "${LUKS_NAME} UUID=${LUKS_UUID} none discard" >> /etc/crypttab
|
||||
done
|
||||
|
||||
# List filesystems with UUID
|
||||
lsblk --fs
|
||||
|
||||
# Now create the array using the /dev/mapper entries from above
|
||||
mkfs.btrfs --data raid1 --metadata raid1 /dev/mapper/crypt-btrfs-vdb /dev/mapper/crypt-btrfs-vdc...
|
||||
btrfs device scan
|
||||
```
|
||||
|
||||
## Monitoring Disk Health
|
||||
|
||||
<https://www.dotlinux.net/blog/how-to-configure-smartd-and-be-notified-of-hard-disk-problems-via-email/#installing-smartd-smartmontools>
|
||||
|
||||
```bash
|
||||
# btrfs device stats shows any errors
|
||||
# Grep for any line not ending in "0"
|
||||
btrfs device stats /mnt | grep -vE ' 0$'
|
||||
|
||||
# Show the device IDs for the mounted filesystem
|
||||
btrfs filesystem show /mnt
|
||||
|
||||
# Delete a device (with ID 8, for example)
|
||||
btrfs device delete 8 /mnt
|
||||
|
||||
# Add a device to the array
|
||||
btrfs device add /dev/vdi1 /mnt
|
||||
|
||||
# Rebalance the array
|
||||
btrfs balance start /mnt
|
||||
```
|
||||
|
||||
## Defragmenting and Compressing
|
||||
|
||||
```bash
|
||||
# Defrag a filesystem
|
||||
btrfs filesystem defragment /mnt
|
||||
|
||||
# Defrag and apply compression
|
||||
# zstd:20 is currently the best compression algorithm
|
||||
btrfs filesystem defragment -c zstd:20 /mnt
|
||||
```
|
||||
91
active/software_luks/luks.md
Normal file
91
active/software_luks/luks.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# LUKS
|
||||
|
||||
Disk Encryption
|
||||
|
||||
## Encrypting a Drive
|
||||
|
||||
You get 8 key slots total.
|
||||
|
||||
```bash
|
||||
# Remember to install if you need it
|
||||
dnf install cryptsetup
|
||||
|
||||
# Create an encryption key
|
||||
mkdir /etc/luks-keys
|
||||
chmod 700 /etc/luks-keys
|
||||
dd if=/dev/urandom bs=128 count=1 of=/etc/luks-keys/data0.key
|
||||
|
||||
# Create an encrypted partition
|
||||
# -q means don't ask for confirmation
|
||||
# -v means verbose
|
||||
cryptsetup -q -v luksFormat /dev/nvme6n1p1 /etc/luks-keys/data0.key
|
||||
|
||||
# Unlock
|
||||
cryptsetup -q -v luksOpen --key-file /etc/luks-keys/data0.key /dev/nvme6n1p1 luks-$(cryptsetup luksUUID /dev/nvme6n1p1)
|
||||
|
||||
# List keys
|
||||
cryptsetup luksDump /dev/nvme6n1p1
|
||||
|
||||
# Remove a key from a slot
|
||||
cryptsetup luksKillSlot /dev/nvme6n1p1 2
|
||||
|
||||
# Add a new key to a slot
|
||||
cryptsetup luksAddKey /dev/nvme6n1p1 -S 5
|
||||
```
|
||||
|
||||
## TPM2 Decryption
|
||||
|
||||
Mostly taken from here:
|
||||
<https://gist.github.com/jdoss/777e8b52c8d88eb87467935769c98a95>
|
||||
|
||||
PCR reference for `--tpm2-pcrs` args
|
||||
|
||||
```text
|
||||
0: System firmware executable
|
||||
2: Kernel
|
||||
4: Bootloader
|
||||
7: Secure boot state
|
||||
8: Cmdline
|
||||
9: Initrd
|
||||
```
|
||||
|
||||
Basic commands:
|
||||
|
||||
```bash
|
||||
# Show tpm2 devices
|
||||
systemd-cryptenroll --tpm2-device=list
|
||||
|
||||
# Show crypto luks block devices
|
||||
blkid -t TYPE=crypto_LUKS
|
||||
|
||||
# Enroll the tpm2 device with systemd-cryptenroll
|
||||
systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=0,2,4,7,8,9 /dev/nvme0n1p3
|
||||
|
||||
# Reenroll
|
||||
systemd-cryptenroll /dev/nvme0n1p3 --wipe-slot=tpm2 --tpm2-device=auto --tpm2-pcrs=0,2,4,7,8,9
|
||||
```
|
||||
|
||||
Note, you'll need to add `rd.luks.options=tpm2-device=auto` to your kernel parameters.
|
||||
|
||||
## Batch Operations
|
||||
|
||||
```bash
|
||||
# Create encrypted drives in bulk
|
||||
export LUKS_DEVS="/dev/nvme4n1p1 /dev/nvme3n1p1 /dev/nvme0n1p1 /dev/nvme1n1p4 /dev/nvme2n1p1 /dev/nvme5n1p1"
|
||||
for luks_drive in $LUKS_DRIVES; do
|
||||
cryptsetup -q -v luksFormat /dev/${luks_drive} /etc/luks-keys/data0.key
|
||||
done
|
||||
|
||||
# Unlock encrypted drives in bulk
|
||||
export LUKS_DEVS="/dev/nvme4n1p1 /dev/nvme3n1p1 /dev/nvme0n1p1 /dev/nvme1n1p4 /dev/nvme2n1p1 /dev/nvme5n1p1"
|
||||
for luks_drive in $LUKS_DRIVES; do
|
||||
cryptsetup -q -v luksOpen --key-file /etc/luks-keys/data0.key /dev/${luks_drive} luks-$(cryptsetup luksUUID /dev/${luks_drive})
|
||||
done
|
||||
|
||||
# Add new keys in bulk
|
||||
export LUKS_DEVS="/dev/nvme4n1p1 /dev/nvme3n1p1 /dev/nvme0n1p1 /dev/nvme1n1p4 /dev/nvme2n1p1 /dev/nvme5n1p1"
|
||||
for luks_dev in $LUKS_DEVS; do
|
||||
echo Adding key to $luks_dev
|
||||
cryptsetup luksAddKey $luks_dev -S 2
|
||||
done
|
||||
```
|
||||
@@ -9,16 +9,11 @@ groups = []
|
||||
hostname = "f43-base"
|
||||
|
||||
[[customizations.disk.partitions]]
|
||||
type = "btrfs"
|
||||
minsize = "32 GiB"
|
||||
|
||||
[[customizations.disk.partitions.subvolumes]]
|
||||
name = "root"
|
||||
type = "plain"
|
||||
label = "root"
|
||||
mountpoint = "/"
|
||||
|
||||
[[customizations.disk.partitions.subvolumes]]
|
||||
name = "home"
|
||||
mountpoint = "/home"
|
||||
fs_type = "ext4"
|
||||
minsize = "128 GiB"
|
||||
|
||||
[customizations.timezone]
|
||||
timezone = "America/New_York"
|
||||
@@ -70,8 +65,8 @@ data = """
|
||||
[[customizations.files]]
|
||||
path = "/home/ducoterra/.inputrc"
|
||||
mode = "0644"
|
||||
user = "root"
|
||||
group = "root"
|
||||
user = "ducoterra"
|
||||
group = "ducoterra"
|
||||
data = """
|
||||
"\\C-h": backward-kill-word
|
||||
"""
|
||||
@@ -33,18 +33,21 @@ sudo usermod -aG weldr $USER
|
||||
|
||||
# Optional: cockpit dependency
|
||||
dnf install -y cockpit-composer
|
||||
|
||||
# Optional: allow security profiles
|
||||
dnf install openscap-scanner scap-security-guide
|
||||
```
|
||||
|
||||
## Building Images
|
||||
|
||||
1. Create a toml file describing your image
|
||||
|
||||
See `fedora-42-base.toml` for an example.
|
||||
See `fedora42-base.toml` for an example.
|
||||
|
||||
2. Push the toml to composer
|
||||
|
||||
```bash
|
||||
composer-cli blueprints push active/software_osbuild/fedora-42-base.toml
|
||||
composer-cli blueprints push active/software_osbuild/fedora42-base.toml
|
||||
|
||||
# List blueprints
|
||||
composer-cli blueprints list
|
||||
@@ -57,13 +60,13 @@ dnf install -y cockpit-composer
|
||||
composer-cli compose types
|
||||
|
||||
# Build the image
|
||||
composer-cli compose start fedora-42-base qcow2
|
||||
composer-cli compose start fedora42-base qcow2
|
||||
|
||||
# Check status
|
||||
watch composer-cli compose status
|
||||
|
||||
# Download logs if error
|
||||
cd /tmp && composer-cli compose logs f91a12b6-01fd-4f94-91cc-9d5fb68b8129
|
||||
cd /tmp && composer-cli compose logs 52963ac9-b680-4def-baaf-252845f0e3fe
|
||||
|
||||
# Delete failed images
|
||||
composer-cli compose list failed -j | jq '.[].body.failed.[]?.id' | xargs -I '%' composer-cli compose delete '%'
|
||||
@@ -79,16 +82,25 @@ dnf install -y cockpit-composer
|
||||
composer-cli compose list finished
|
||||
|
||||
# Download the image
|
||||
composer-cli compose image --filename /var/lib/libvirt/images/fedora-42-base.qcow2 image-uuid
|
||||
composer-cli compose image --filename active/software_osbuild/secrets/fedora43-base.qcow2 image-uuid
|
||||
|
||||
# Test with qemu
|
||||
qemu-kvm --name test-fedora-42-base -m 4096 -hda ~/Downloads/fedora-42-base.qcow2
|
||||
virt-install \
|
||||
--name "fedora43-base" \
|
||||
--boot uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no \
|
||||
--cpu host-passthrough --vcpus sockets=1,cores=8,threads=2 \
|
||||
--ram=8192 \
|
||||
--os-variant=fedora41 \
|
||||
--network bridge:virbr0 \
|
||||
--graphics none \
|
||||
--console pty,target.type=virtio \
|
||||
--import --disk "path=active/software_osbuild/secrets/fedora43-base.qcow2,bus=virtio"
|
||||
```
|
||||
|
||||
### Image Build and Watch One Liner
|
||||
|
||||
```bash
|
||||
composer-cli blueprints push active/software_osbuild/fedora-43-base.toml && \
|
||||
composer-cli compose start fedora-43-base qcow2 && \
|
||||
composer-cli blueprints push active/software_osbuild/fedora43-base.toml && \
|
||||
composer-cli compose start fedora43-base qcow2 && \
|
||||
watch composer-cli compose status
|
||||
```
|
||||
97
active/software_smb/smb.md
Normal file
97
active/software_smb/smb.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# SMB
|
||||
|
||||
- [SMB](#smb)
|
||||
- [Install SMB](#install-smb)
|
||||
- [Create SMB User](#create-smb-user)
|
||||
- [Create a SMB Share](#create-a-smb-share)
|
||||
- [Create a SMB Share with Many Users](#create-a-smb-share-with-many-users)
|
||||
|
||||
## Install SMB
|
||||
|
||||
```bash
|
||||
sudo dnf install samba
|
||||
sudo systemctl enable smb --now
|
||||
firewall-cmd --get-active-zones
|
||||
sudo firewall-cmd --permanent --zone=FedoraServer --add-service=samba
|
||||
sudo firewall-cmd --reload
|
||||
```
|
||||
|
||||
## Create SMB User
|
||||
|
||||
```bash
|
||||
sudo smbpasswd -a ducoterra
|
||||
```
|
||||
|
||||
## Create a SMB Share
|
||||
|
||||
```bash
|
||||
# Create share
|
||||
mkdir /btrfs/pool0/smb/ducoterra
|
||||
|
||||
# Set proper selinux labels for samba
|
||||
sudo semanage fcontext --add --type "samba_share_t" "/btrfs/pool0/smb/ducoterra(/.*)?"
|
||||
|
||||
# Run restorecon at the root of the btrfs subvolume
|
||||
sudo restorecon -R /btrfs/pool0
|
||||
```
|
||||
|
||||
Edit /etc/samba/smb.conf
|
||||
|
||||
```conf
|
||||
[ducoterra]
|
||||
comment = My Share
|
||||
path = /btrfs/pool0/smb/ducoterra
|
||||
writeable = yes
|
||||
browseable = yes
|
||||
public = no
|
||||
create mask = 0644
|
||||
directory mask = 0755
|
||||
write list = user
|
||||
```
|
||||
|
||||
Then restart SMB
|
||||
|
||||
```bash
|
||||
sudo systemctl restart smb
|
||||
```
|
||||
|
||||
## Create a SMB Share with Many Users
|
||||
|
||||
```bash
|
||||
sudo groupadd myfamily
|
||||
sudo useradd -G myfamily jack
|
||||
sudo useradd -G myfamily maria
|
||||
|
||||
sudo smbpasswd -a jack
|
||||
sudo smbpasswd -a maria
|
||||
|
||||
sudo mkdir /home/share
|
||||
sudo chgrp myfamily /home/share
|
||||
sudo chmod 770 /home/share
|
||||
sudo semanage fcontext --add --type "samba_share_t" "/home/share(/.*)?"
|
||||
sudo restorecon -R /home/share
|
||||
```
|
||||
|
||||
```conf
|
||||
[family]
|
||||
comment = Family Share
|
||||
path = /home/share
|
||||
writeable = yes
|
||||
browseable = yes
|
||||
public = yes
|
||||
valid users = @myfamily
|
||||
create mask = 0660
|
||||
directory mask = 0770
|
||||
force group = +myfamily
|
||||
```
|
||||
|
||||
- valid users: only users of the group family have access rights. The @ denotes a group name.
|
||||
- force group = +myfamily: files and directories are created with this group, instead of the user group.
|
||||
- create mask = 0660: files in the share are created with permissions to allow all group users to read and write files created by other users.
|
||||
- directory mask = 0770: as before, but for directories.
|
||||
|
||||
Don't forget to restart smb
|
||||
|
||||
```bash
|
||||
systemctl restart smb
|
||||
```
|
||||
@@ -19,7 +19,9 @@ Virtual Machine Management
|
||||
- [Create a Cloud Init Compatible VM](#create-a-cloud-init-compatible-vm)
|
||||
- [Create VM with Graphics using an ISO Installation Disk](#create-vm-with-graphics-using-an-iso-installation-disk)
|
||||
- [Create VM using Host Device as Disk](#create-vm-using-host-device-as-disk)
|
||||
- [Create a Home Assistant VM](#create-a-home-assistant-vm)
|
||||
- [Snapshots](#snapshots)
|
||||
- [Creating and Attaching Disks](#creating-and-attaching-disks)
|
||||
- [Virt Builder](#virt-builder)
|
||||
|
||||
## Before you Begin
|
||||
@@ -55,6 +57,12 @@ Virtual Machine Management
|
||||
export LIBVIRT_DEFAULT_URI='qemu+ssh://user@server/system'
|
||||
```
|
||||
|
||||
Or for Truenas
|
||||
|
||||
```bash
|
||||
export LIBVIRT_DEFAULT_URI='qemu+ssh://root@truenas/system?socket=/run/truenas_libvirt/libvirt-sock'
|
||||
```
|
||||
|
||||
## Useful Virsh Commands
|
||||
|
||||
```bash
|
||||
@@ -234,7 +242,7 @@ virt-install \
|
||||
--import --disk "path=${VM_DISK_PATH},bus=virtio"
|
||||
```
|
||||
|
||||
#### Create a Cloud Init Compatible VM
|
||||
### Create a Cloud Init Compatible VM
|
||||
|
||||
<https://cloudinit.readthedocs.io/en/latest/reference/examples.html>
|
||||
|
||||
@@ -318,10 +326,93 @@ virt-install \
|
||||
--disk none
|
||||
```
|
||||
|
||||
### Create a Home Assistant VM
|
||||
|
||||
```bash
|
||||
virt-install \
|
||||
--name haos \
|
||||
--description "Home Assistant OS" \
|
||||
--os-variant=generic \
|
||||
--ram=4096 \
|
||||
--vcpus=2 \
|
||||
--disk /var/lib/libvirt/images/haos_ova-16.3.qcow2,bus=scsi \
|
||||
--controller type=scsi,model=virtio-scsi \
|
||||
--import \
|
||||
--graphics none \
|
||||
--boot uefi
|
||||
```
|
||||
|
||||
## Snapshots
|
||||
|
||||
See [qemu qcow2 snapshots](/active/software_qemu/qemu.md#qcow2-snapshots)
|
||||
|
||||
## Creating and Attaching Disks
|
||||
|
||||
To create and attach one disk:
|
||||
|
||||
```bash
|
||||
export VM_NAME="cloud-init-test-fedora"
|
||||
export VM_DISK_NAME="test1"
|
||||
qemu-img create -f qcow2 /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2 1G
|
||||
|
||||
virsh attach-disk ${VM_NAME} \
|
||||
--source /var/lib/libvirt/images/${VM_DISK_NAME} \
|
||||
--target vdb \
|
||||
--persistent
|
||||
--live
|
||||
```
|
||||
|
||||
To create and attach multiple disks (for raid testing)
|
||||
|
||||
```bash
|
||||
export VM_NAME="cloud-init-test-fedora"
|
||||
# Max supported for this script is 25
|
||||
export VM_NUM_DISKS=8
|
||||
export VM_DISK_SIZE=4G
|
||||
|
||||
##### Attach #####
|
||||
# Create the disks and target mounts from our array
|
||||
letters=($(echo {a..z}))
|
||||
for disk_num in $(seq 1 $VM_NUM_DISKS); do
|
||||
VM_DISK_NAME="test-${disk_num}"
|
||||
VM_DISK_TARGET=vd${letters[$disk_num]}
|
||||
|
||||
echo "Creating /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2"
|
||||
sudo qemu-img create -f qcow2 /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2 ${VM_DISK_SIZE}
|
||||
|
||||
echo "Attaching vd${letters[$disk_num]} to ${VM_NAME}"
|
||||
virsh attach-disk ${VM_NAME} \
|
||||
--source /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2 \
|
||||
--target vd${letters[$disk_num]} \
|
||||
--persistent \
|
||||
--subdriver qcow2 \
|
||||
--live
|
||||
done;
|
||||
|
||||
##### Cleanup #####
|
||||
# Detach the disks from our VMs
|
||||
letters=($(echo {a..z}))
|
||||
for disk_num in $(seq 1 $VM_NUM_DISKS); do
|
||||
VM_DISK_NAME="test-${disk_num}"
|
||||
VM_DISK_TARGET=vd${letters[$disk_num]}
|
||||
|
||||
echo "Detaching vd${letters[$disk_num]} from ${VM_NAME}"
|
||||
virsh detach-disk ${VM_NAME} \
|
||||
--target vd${letters[$disk_num]} \
|
||||
--persistent
|
||||
done;
|
||||
|
||||
# Optionally delete images
|
||||
letters=($(echo {a..z}))
|
||||
for disk_num in $(seq 1 $VM_NUM_DISKS); do
|
||||
VM_DISK_NAME="test-${disk_num}"
|
||||
VM_DISK_TARGET=vd${letters[$disk_num]}
|
||||
|
||||
echo "Removing /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2"
|
||||
sudo rm /var/lib/libvirt/images/${VM_DISK_NAME}.qcow2
|
||||
done;
|
||||
```
|
||||
|
||||
## Virt Builder
|
||||
|
||||
<https://docs.fedoraproject.org/en-US/fedora-server/virtualization/vm-install-diskimg-virtbuilder/#_minimal_effort_customization>
|
||||
|
||||
Reference in New Issue
Block a user