Compare commits

...

13 Commits

Author SHA1 Message Date
f70028cf63 init uv project for homelab
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m26s
2026-02-25 12:23:17 -05:00
ecf4ae1058 update templates with new names 2026-02-25 12:22:29 -05:00
eff2aa4066 add keys 2026-02-25 12:21:51 -05:00
a53e67653d fix restorecon command 2026-02-25 12:21:31 -05:00
d48b9a66cb add machinectl to fedora43 base osbuild 2026-02-25 12:21:17 -05:00
2c5af8507c add fedora kernel notes 2026-02-25 12:21:01 -05:00
ba66c47719 move ai notes from framework_desktop to software_ai_stack 2026-02-25 12:20:17 -05:00
da0b06768e add gpu passthrough notes to driveripper 2026-02-25 12:19:46 -05:00
1c6e1b7032 add container_rabbitmq 2026-02-25 12:19:06 -05:00
087d8888cf remove private nginx.conf 2026-02-25 12:18:44 -05:00
cb486ae289 move gitea port to 22 2026-02-25 12:17:39 -05:00
cd56318ab0 add container_elk notes 2026-02-25 12:16:30 -05:00
416321206d add caddy waf docs 2026-02-25 12:15:49 -05:00
52 changed files with 1610 additions and 645 deletions

View File

@@ -1,7 +1,9 @@
FROM docker.io/caddy:2-builder AS builder
RUN xcaddy build \
--with github.com/caddy-dns/route53@v1.6.0
--with github.com/caddy-dns/route53@v1.6.0 \
--with github.com/fabriziosalmi/caddy-waf
FROM docker.io/caddy:2

View File

@@ -6,6 +6,8 @@
- [Ansible](#ansible)
- [Manual](#manual)
- [Adding a new Caddy Record](#adding-a-new-caddy-record)
- [Logs](#logs)
- [Caddy WAF](#caddy-waf)
## Custom Caddy Image
@@ -138,3 +140,66 @@ ddns service:
1. Update the [ddns caddy records](/active/container_ddns/secrets/caddy_records.yaml)
2. (Optional) Update the Caddyfile at `active/container_caddy/secrets/Caddyfile`
3. Run the [caddy ansible playbook](/active/container_caddy/caddy.md#install-caddy)
## Logs
```bash
# Follow remote connections
podman logs -f caddy | grep -e '^{' | jq -c '.request | {remote_ip,host}'
# Filter out noisy hosts
podman logs -f caddy | grep -e '^{' | jq -c '.request | {remote_ip,host} | select(.host != "gitea.reeseapps.com")'
# Focus on user agents
podman logs -f caddy | grep -e '^{' | jq -c '
{
"User-Agent": .request.headers["User-Agent"],
remote_ip: .request.remote_ip,
host: .request.host,
status: .status
}
'
```
## Caddy WAF
<https://github.com/fabriziosalmi/caddy-waf>
1. Copy the rules.json to `/etc/caddy/rules.json`
2. Update the Caddyfile to something like this:
```Caddyfile
gitea.reeseapps.com:443 {
log {
output stdout
format json {
message_key msg # Key for the log message
level_key severity # Key for the log level
time_key timestamp # Key for the timestamp
name_key logger # Key for the logger name
caller_key function # Key for the caller information
stacktrace_key stack # Key for error stacktraces
time_format "2006-01-02 15:04:05 MST" # RFC3339-like format
time_local # Use local timezone
duration_format "ms" # Show durations in milliseconds
level_format "upper" # Uppercase log levels
}
}
route {
waf {
metrics_endpoint /waf_metrics
rule_file rules.json
}
@wafmetrics {
path /waf_metrics
}
handle @wafmetrics { } # empty → let the WAF serve the metrics
handle {
reverse_proxy gitea.reeselink.com:3000
}
}
}
```

View File

@@ -1,38 +1,45 @@
- name: Create Caddy Proxy
hosts: caddy
tasks:
- name: Copy Containerfile for build
template:
src: Containerfile
dest: /etc/caddy/Containerfile
owner: root
group: root
mode: '0644'
- name: Build Caddy Image
shell:
cmd: podman build -t gitea.reeseapps.com/services/caddy:latest -f /etc/caddy/Containerfile
- name: Create /etc/caddy dir
ansible.builtin.file:
path: /etc/caddy
state: directory
mode: '0755'
- name: Copy Caddyfile
template:
src: secrets/proxy.Caddyfile
dest: /etc/caddy/Caddyfile
owner: root
group: root
mode: '0644'
- name: Template Caddy Container Services
template:
src: caddy.container
dest: /etc/containers/systemd/caddy.container
owner: root
group: root
mode: '0644'
- name: Reload and start the Caddy service
ansible.builtin.systemd_service:
state: restarted
name: caddy.service
enabled: true
daemon_reload: true
- name: Copy Containerfile for build
template:
src: Containerfile
dest: /etc/caddy/Containerfile
owner: root
group: root
mode: "0644"
- name: Build Caddy Image
shell:
cmd: podman build -t gitea.reeseapps.com/services/caddy:latest -f /etc/caddy/Containerfile
- name: Create /etc/caddy dir
ansible.builtin.file:
path: /etc/caddy
state: directory
mode: "0755"
- name: Copy Caddyfile
template:
src: secrets/proxy.Caddyfile
dest: /etc/caddy/Caddyfile
owner: root
group: root
mode: "0644"
- name: Copy rules.json
template:
src: rules.json
dest: /etc/caddy/rules.json
owner: root
group: root
mode: "0644"
- name: Template Caddy Container Services
template:
src: caddy.container
dest: /etc/containers/systemd/caddy.container
owner: root
group: root
mode: "0644"
- name: Reload and start the Caddy service
ansible.builtin.systemd_service:
state: restarted
name: caddy.service
enabled: true
daemon_reload: true

View File

@@ -0,0 +1,26 @@
[
{
"id": "block-scanners",
"phase": 1,
"pattern": "(?i)(nikto|sqlmap|nmap|acunetix|nessus|openvas|wpscan|dirbuster|burpsuite|owasp zap|netsparker|appscan|arachni|skipfish|gobuster|wfuzz|hydra|metasploit|nessus|openvas|qualys|zap|w3af|openwebspider|netsparker|appspider|rapid7|nessus|qualys|nuclei|zgrab|vega|gospider|gxspider|whatweb|xspider|joomscan|uniscan|blindelephant)",
"targets": [
"HEADERS:User-Agent"
],
"severity": "CRITICAL",
"action": "block",
"score": 10,
"description": "Block traffic from known vulnerability scanners and penetration testing tools. Includes more scanners."
},
{
"id": "block-crawlers",
"phase": 1,
"pattern": "(meta-externalagent)",
"targets": [
"HEADERS:User-Agent"
],
"severity": "CRITICAL",
"action": "block",
"score": 10,
"description": "Block traffic from web scrapers and crawlers."
}
]

41
active/container_elk/.env Normal file
View File

@@ -0,0 +1,41 @@
# Project namespace (defaults to the current folder name if not set)
#COMPOSE_PROJECT_NAME=myproject
# Password for the 'elastic' user (at least 6 characters)
ELASTIC_PASSWORD=changeme
# Password for the 'kibana_system' user (at least 6 characters)
KIBANA_PASSWORD=changeme
# Version of Elastic products
STACK_VERSION=8.7.1
# Set the cluster name
CLUSTER_NAME=docker-cluster
# Set to 'basic' or 'trial' to automatically start the 30-day trial
LICENSE=basic
#LICENSE=trial
# Port to expose Elasticsearch HTTP API to the host
ES_PORT=9200
# Port to expose Kibana to the host
KIBANA_PORT=5601
# Increase or decrease based on the available host memory (in bytes)
ES_MEM_LIMIT=1073741824
KB_MEM_LIMIT=1073741824
LS_MEM_LIMIT=1073741824
# SAMPLE Predefined Key only to be used in POC environments
ENCRYPTION_KEY=c34d38b3a14956121ff2170e5030b471551370178f43e5626eec58b04a30fae2

View File

@@ -0,0 +1,219 @@
version: "3.8"
volumes:
certs:
driver: local
esdata01:
driver: local
kibanadata:
driver: local
metricbeatdata01:
driver: local
filebeatdata01:
driver: local
logstashdata01:
driver: local
networks:
default:
name: elastic
external: false
services:
setup:
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
volumes:
- certs:/usr/share/elasticsearch/config/certs
user: "0"
command: >
bash -c '
if [ x${ELASTIC_PASSWORD} == x ]; then
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
exit 1;
elif [ x${KIBANA_PASSWORD} == x ]; then
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
exit 1;
fi;
if [ ! -f config/certs/ca.zip ]; then
echo "Creating CA";
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
unzip config/certs/ca.zip -d config/certs;
fi;
if [ ! -f config/certs/certs.zip ]; then
echo "Creating certs";
echo -ne \
"instances:\n"\
" - name: es01\n"\
" dns:\n"\
" - es01\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
" - name: kibana\n"\
" dns:\n"\
" - kibana\n"\
" - localhost\n"\
" ip:\n"\
" - 127.0.0.1\n"\
> config/certs/instances.yml;
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
unzip config/certs/certs.zip -d config/certs;
fi;
echo "Setting file permissions"
chown -R root:root config/certs;
find . -type d -exec chmod 750 \{\} \;;
find . -type f -exec chmod 640 \{\} \;;
echo "Waiting for Elasticsearch availability";
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
echo "Setting kibana_system password";
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
echo "All done!";
'
healthcheck:
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
interval: 1s
timeout: 5s
retries: 120
es01:
depends_on:
setup:
condition: service_healthy
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
labels:
co.elastic.logs/module: elasticsearch
volumes:
- certs:/usr/share/elasticsearch/config/certs
- esdata01:/usr/share/elasticsearch/data
ports:
- ${ES_PORT}:9200
environment:
- node.name=es01
- cluster.name=${CLUSTER_NAME}
- discovery.type=single-node
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- bootstrap.memory_lock=true
- xpack.security.enabled=true
- xpack.security.http.ssl.enabled=true
- xpack.security.http.ssl.key=certs/es01/es01.key
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.enabled=true
- xpack.security.transport.ssl.key=certs/es01/es01.key
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
- xpack.security.transport.ssl.verification_mode=certificate
- xpack.license.self_generated.type=${LICENSE}
mem_limit: ${ES_MEM_LIMIT}
ulimits:
memlock:
soft: -1
hard: -1
healthcheck:
test:
[
"CMD-SHELL",
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
]
interval: 10s
timeout: 10s
retries: 120
kibana:
depends_on:
es01:
condition: service_healthy
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
labels:
co.elastic.logs/module: kibana
volumes:
- certs:/usr/share/kibana/config/certs
- kibanadata:/usr/share/kibana/data
ports:
- ${KIBANA_PORT}:5601
environment:
- SERVERNAME=kibana
- ELASTICSEARCH_HOSTS=https://es01:9200
- ELASTICSEARCH_USERNAME=kibana_system
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
- XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY}
- XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY}
mem_limit: ${KB_MEM_LIMIT}
healthcheck:
test:
[
"CMD-SHELL",
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
]
interval: 10s
timeout: 10s
retries: 120
metricbeat01:
depends_on:
es01:
condition: service_healthy
kibana:
condition: service_healthy
image: docker.elastic.co/beats/metricbeat:${STACK_VERSION}
user: root
volumes:
- certs:/usr/share/metricbeat/certs
- metricbeatdata01:/usr/share/metricbeat/data
- "./metricbeat.yaml:/usr/share/metricbeat/metricbeat.yml:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro"
- "/proc:/hostfs/proc:ro"
- "/:/hostfs:ro"
environment:
- ELASTIC_USER=elastic
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- ELASTIC_HOSTS=https://es01:9200
- KIBANA_HOSTS=http://kibana:5601
- LOGSTASH_HOSTS=http://logstash01:9600
filebeat01:
depends_on:
es01:
condition: service_healthy
image: docker.elastic.co/beats/filebeat:${STACK_VERSION}
user: root
volumes:
- certs:/usr/share/filebeat/certs
- filebeatdata01:/usr/share/filebeat/data
- "./filebeat_ingest_data/:/usr/share/filebeat/ingest_data/"
- "./filebeat.yaml:/usr/share/filebeat/filebeat.yml:ro"
- "/var/lib/docker/containers:/var/lib/docker/containers:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
environment:
- ELASTIC_USER=elastic
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- ELASTIC_HOSTS=https://es01:9200
- KIBANA_HOSTS=http://kibana:5601
- LOGSTASH_HOSTS=http://logstash01:9600
logstash01:
depends_on:
es01:
condition: service_healthy
kibana:
condition: service_healthy
image: docker.elastic.co/logstash/logstash:${STACK_VERSION}
labels:
co.elastic.logs/module: logstash
user: root
volumes:
- certs:/usr/share/logstash/certs
- logstashdata01:/usr/share/logstash/data
- "./logstash_ingest_data/:/usr/share/logstash/ingest_data/"
- "./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro"
environment:
- xpack.monitoring.enabled=false
- ELASTIC_USER=elastic
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
- ELASTIC_HOSTS=https://es01:9200

View File

@@ -0,0 +1,14 @@
# Elk Stack
## Install
<https://www.elastic.co/blog/getting-started-with-the-elastic-stack-and-docker-compose>
```bash
# Copy over the files
scp -rp active/container_elk/. elk:elk
# SSH into the host
ssh -t elk "cd elk ; bash --login"
# Run the services
docker compose -f elk-compose.yaml up
```

View File

@@ -0,0 +1,29 @@
filebeat.inputs:
- type: filestream
id: default-filestream
paths:
- ingest_data/*.log
filebeat.autodiscover:
providers:
- type: docker
hints.enabled: true
processors:
- add_docker_metadata: ~
setup.kibana:
host: ${KIBANA_HOSTS}
username: ${ELASTIC_USER}
password: ${ELASTIC_PASSWORD}
output.elasticsearch:
hosts: ${ELASTIC_HOSTS}
username: ${ELASTIC_USER}
password: ${ELASTIC_PASSWORD}
ssl.enabled: true
ssl.certificate_authorities: "certs/ca/ca.crt"

View File

@@ -0,0 +1,24 @@
input {
file {
#https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html
#default is TAIL which assumes more data will come into the file.
#change to mode => "read" if the file is a compelte file. by default, the file will be removed once reading is complete -- backup your files if you need them.
mode => "tail"
path => "/usr/share/logstash/ingest_data/*"
}
}
filter {
}
output {
elasticsearch {
index => "logstash-%{+YYYY.MM.dd}"
hosts=> "${ELASTIC_HOSTS}"
user=> "${ELASTIC_USER}"
password=> "${ELASTIC_PASSWORD}"
cacert=> "certs/ca/ca.crt"
}
}

View File

@@ -0,0 +1,62 @@
metricbeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
metricbeat.modules:
- module: elasticsearch
xpack.enabled: true
period: 10s
hosts: ${ELASTIC_HOSTS}
ssl.certificate_authorities: "certs/ca/ca.crt"
ssl.certificate: "certs/es01/es01.crt"
ssl.key: "certs/es01/es01.key"
username: ${ELASTIC_USER}
password: ${ELASTIC_PASSWORD}
ssl.enabled: true
- module: logstash
xpack.enabled: true
period: 10s
hosts: ${LOGSTASH_HOSTS}
- module: kibana
metricsets:
- stats
period: 10s
hosts: ${KIBANA_HOSTS}
username: ${ELASTIC_USER}
password: ${ELASTIC_PASSWORD}
xpack.enabled: true
- module: docker
metricsets:
- "container"
- "cpu"
- "diskio"
- "healthcheck"
- "info"
#- "image"
- "memory"
- "network"
hosts: ["unix:///var/run/docker.sock"]
period: 10s
enabled: true
processors:
- add_host_metadata: ~
- add_docker_metadata: ~
output.elasticsearch:
hosts: ${ELASTIC_HOSTS}
username: ${ELASTIC_USER}
password: ${ELASTIC_PASSWORD}
ssl:
certificate: "certs/es01/es01.crt"
certificate_authorities: "certs/ca/ca.crt"
key: "certs/es01/es01.key"

View File

@@ -24,7 +24,7 @@ services:
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:22"
- "22:22"
depends_on:
- db

View File

@@ -21,8 +21,11 @@
Prereqs
1. Mount data dirs at `/srv/gitea-data` and `/srv/gitea-db`
2. Create a gitea user and update gitea-compose.yaml with the correct UID
1. Change the default SSH port for your server to 2022 (or something similar).
2. Allow SSH to bind to that port: `semanage port -a -t ssh_port_t -p tcp 2022`
3. Allow 2022 on the firewall: `firewall-cmd --add-port=2022/tcp --permanent && firewall-cmd --reload`
4. Mount data dirs at `/srv/gitea-data` and `/srv/gitea-db`
5. Create a gitea user and update gitea-compose.yaml with the correct UID
```bash
scp active/container_gitea/gitea-compose.yaml gitea:

View File

@@ -1,46 +0,0 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
log_format stream_logs '$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time "$upstream_addr"';
access_log /dev/stdout stream_logs;
error_log stderr info;
server {
listen 3478;
listen [::]:3478;
proxy_pass nextcloud.reeselink.com:3478;
}
server {
listen 2222;
listen [::]:2222;
proxy_pass gitea.reeselink.com:2222;
}
server {
listen 8080;
listen [::]:8080;
proxy_pass unifi-external.reeselink.com:2222;
}
server {
listen 25565;
listen [::]:25565;
proxy_pass minecraft.reeselink.com:25565;
}
server {
listen 25566;
listen [::]:25566;
proxy_pass minecraft.reeselink.com:25566;
}
}

View File

@@ -2,6 +2,35 @@
## Initial Install
Create your initial `secrets/nginx.conf` to look something like:
```conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
stream {
log_format stream_logs '$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time "$upstream_addr"';
access_log /dev/stdout stream_logs;
error_log stderr info;
server {
listen 25565;
listen [::]:25565;
proxy_pass my-minecraft-server.internal.dns:25565;
}
}
```
Create the systemd service:
```bash
# Get the initial configuration
vim /etc/containers/systemd/nginx.container
@@ -26,11 +55,27 @@ Restart=always
WantedBy=default.target
```
## Update the Configuration
Reload the service and start it:
```bash
scp active/container_nginx/nginx.conf proxy:/etc/nginx/nginx.conf
ssh proxy
systemctl daemon-reload
systemctl start nginx
```
## Update the Configuration
```bash
scp active/container_nginx/secrets/nginx.conf proxy:/etc/nginx/nginx.conf
ssh proxy
systemctl restart nginx
```
## Logs
```bash
# Watch client connections
journalctl -u nginx -f | grep -e 'client .* connected'
# Watch upstream proxy connections
journalctl -u nginx -f | grep -e 'proxy .* connected'
```

View File

@@ -0,0 +1,11 @@
services:
rabbitmq:
container_name: rabbitmq
restart: always
image: docker.io/rabbitmq:3-management
ports:
- 15672:15672
- 5672:5672
env_file: /home/rabbitmq/rabbitmq.env
volumes:
- /home/rabbitmq/data:/var/lib/rabbitmq

View File

@@ -0,0 +1,2 @@
RABBITMQ_DEFAULT_USER=user
RABBITMQ_DEFAULT_PASS=password

View File

@@ -0,0 +1,196 @@
# Podman rabbitmq
- [Podman rabbitmq](#podman-rabbitmq)
- [Setup rabbitmq Project](#setup-rabbitmq-project)
- [Install rabbitmq](#install-rabbitmq)
- [Create the rabbitmq user](#create-the-rabbitmq-user)
- [Generate the rabbitmq tls certs](#generate-the-rabbitmq-tls-certs)
- [Write the rabbitmq compose spec](#write-the-rabbitmq-compose-spec)
- [A Note on Volumes](#a-note-on-volumes)
- [Convert rabbitmq compose spec to quadlets](#convert-rabbitmq-compose-spec-to-quadlets)
- [Create any container-mounted directories](#create-any-container-mounted-directories)
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
- [Alias rabbitmqctl](#alias-rabbitmqctl)
- [Expose rabbitmq](#expose-rabbitmq)
- [firewalld](#firewalld)
- [Backup rabbitmq](#backup-rabbitmq)
- [Upgrade rabbitmq](#upgrade-rabbitmq)
- [Upgrade Quadlets](#upgrade-quadlets)
- [Uninstall](#uninstall)
- [Notes](#notes)
- [SELinux](#selinux)
## Setup rabbitmq Project
- [x] Copy and rename this folder to active/container_rabbitmq
- [x] Find and replace rabbitmq with the name of the service.
- [ ] Create the rootless user to run the podman containers
- [ ] Write the compose.yaml spec for your service
- [ ] Convert the compose.yaml spec to a quadlet
- [ ] Install the quadlet on the podman server
- [ ] Expose the quadlet service
- [ ] Install a backup service and timer
## Install rabbitmq
### Create the rabbitmq user
```bash
# SSH into your podman server as root
useradd rabbitmq
loginctl enable-linger $(id -u rabbitmq)
systemctl --user --machine=rabbitmq@.host enable podman-restart
systemctl --user --machine=rabbitmq@.host enable --now podman.socket
mkdir -p /home/rabbitmq/.config/containers/systemd
```
### Generate the rabbitmq tls certs
We'll use tls authentication to ensure encryption between our servers and clients.
<https://www.rabbitmq.com/docs/ssl#automated-certificate-generation-transcript>
```bash
ssh rabbitmq
git clone https://github.com/rabbitmq/tls-gen tls-gen
cd tls-gen/basic
# private key password
make PASSWORD=bunnies
make verify
make info
ls -l ./result
```
### Write the rabbitmq compose spec
Edit the compose.yaml at active/container_rabbitmq/compose/compose.yaml
#### A Note on Volumes
Named volumes are stored at `/home/rabbitmq/.local/share/containers/storage/volumes/`.
### Convert rabbitmq compose spec to quadlets
Run the following to convert a compose.yaml into the various `.container` files for systemd:
```bash
# Generate the systemd service
podman run \
--security-opt label=disable \
--rm \
-v $(pwd)/active/container_rabbitmq/compose:/compose \
-v $(pwd)/active/container_rabbitmq/quadlets:/quadlets \
quay.io/k9withabone/podlet \
-f /quadlets \
-i \
--overwrite \
compose /compose/compose.yaml
# Copy the files to the server
export PODMAN_SERVER=rabbitmq
scp -r active/container_rabbitmq/quadlets/. $PODMAN_SERVER:/home/rabbitmq/.config/containers/systemd/
ssh $PODMAN_SERVER chown -R rabbitmq:rabbitmq /home/rabbitmq/.config/containers/systemd/
```
### Create any container-mounted directories
SSH into your podman server as root:
```bash
machinectl shell rabbitmq@
podman unshare
# /var/lib/rabbitmq
mkdir data
# Chown to the namespaced user with UID 1000
# This will be some really obscure UID outside the namespace
# This will also solve most permission denied errors
chown -R 1000:1000 some_volume
```
### Start and enable your systemd quadlet
SSH into your podman server as root:
```bash
machinectl shell rabbitmq@
systemctl --user daemon-reload
systemctl --user restart rabbitmq
# Enable auto-update service which will pull new container images automatically every day
systemctl --user enable --now podman-auto-update.timer
```
### Alias rabbitmqctl
We'll use containers to run rabbitmqctl, so we'll add an alias to our `.bashrc`
to make things easier:
```bash
alias rabbitmqctl='podman exec -it rabbitmq rabbitmqctl'
```
### Expose rabbitmq
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
3. Finally, follow your OS's guide for opening ports via its firewall service.
#### firewalld
```bash
# command to get current active zone and default zone
firewall-cmd --get-active-zones
firewall-cmd --get-default-zone
# command to open 443 on tcp
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
# command to open 80 and 443 on tcp and udp
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
# command to list available services and then open http and https
firewall-cmd --get-services
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
```
## Backup rabbitmq
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
## Upgrade rabbitmq
### Upgrade Quadlets
Upgrades should be a repeat of [writing the compose spec](#convert-rabbitmq-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
```bash
export PODMAN_SERVER=
scp -r quadlets/. $PODMAN_SERVER$:/home/rabbitmq/.config/containers/systemd/
ssh rabbitmq systemctl --user daemon-reload
ssh rabbitmq systemctl --user restart rabbitmq
```
## Uninstall
```bash
# Stop the user's services
systemctl --user disable podman-restart
podman container stop --all
systemctl --user disable --now podman.socket
systemctl --user disable --now podman-auto-update.timer
# Delete the user (this won't delete their home directory)
# userdel might spit out an error like:
# userdel: user rabbitmq is currently used by process 591255
# kill those processes and try again
userdel rabbitmq
```
## Notes
### SELinux
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
:z allows a container to share a mounted volume with all other containers.
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.

View File

@@ -0,0 +1,12 @@
[Container]
ContainerName=rabbitmq
EnvironmentFile=/srv/rabbitmq/rabbitmq.env
Image=docker.io/rabbitmq:3-management
PublishPort=15672:15672
PublishPort=5672:5672
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,108 @@
# Podman rabbitmq
- [Podman rabbitmq](#podman-rabbitmq)
- [Setup rabbitmq Project](#setup-rabbitmq-project)
- [Install rabbitmq](#install-rabbitmq)
- [Expose rabbitmq](#expose-rabbitmq)
- [firewalld](#firewalld)
- [Backup rabbitmq](#backup-rabbitmq)
- [Upgrade rabbitmq](#upgrade-rabbitmq)
- [Upgrade Quadlets](#upgrade-quadlets)
- [Uninstall](#uninstall)
- [Notes](#notes)
- [SELinux](#selinux)
## Setup rabbitmq Project
- [x] Copy and rename this folder to active/container_foobar
- [x] Find and replace rabbitmq with the name of the service.
- [ ] Create the rootless user to run the podman containers
- [ ] Write the compose.yaml spec for your service
- [ ] Convert the compose.yaml spec to a quadlet
- [ ] Install the quadlet on the podman server
- [ ] Expose the quadlet service
- [ ] Install a backup service and timer
## Install rabbitmq
<https://hub.docker.com/_/rabbitmq/>
```bash
ssh rabbitmq mkdir /srv/rabbitmq
scp active/container_rabbitmq/example.env rabbitmq:/srv/rabbitmq/rabbitmq.env
scp active/container_rabbitmq/rabbitmq-compose.yaml rabbitmq:
ssh rabbitmq docker compose -f rabbitmq-compose.yaml up
```
List queues
```bash
docker exec -it rabbitmq rabbitmqctl list_queues
```
### Expose rabbitmq
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
3. Finally, follow your OS's guide for opening ports via its firewall service.
#### firewalld
```bash
# command to get current active zone and default zone
firewall-cmd --get-active-zones
firewall-cmd --get-default-zone
# command to open 443 on tcp
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
# command to open 80 and 443 on tcp and udp
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
# command to list available services and then open http and https
firewall-cmd --get-services
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
```
## Backup rabbitmq
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
## Upgrade rabbitmq
### Upgrade Quadlets
Upgrades should be a repeat of [writing the compose spec](#convert-rabbitmq-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
```bash
export PODMAN_SERVER=
scp -r quadlets/. $PODMAN_SERVER$:/home/rabbitmq/.config/containers/systemd/
ssh rabbitmq systemctl --user daemon-reload
ssh rabbitmq systemctl --user restart rabbitmq
```
## Uninstall
```bash
# Stop the user's services
systemctl --user disable podman-restart
podman container stop --all
systemctl --user disable --now podman.socket
systemctl --user disable --now podman-auto-update.timer
# Delete the user (this won't delete their home directory)
# userdel might spit out an error like:
# userdel: user rabbitmq is currently used by process 591255
# kill those processes and try again
userdel rabbitmq
```
## Notes
### SELinux
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
:z allows a container to share a mounted volume with all other containers.
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.

View File

@@ -102,7 +102,6 @@ qemu-img convert -f qcow2 -O raw \
# Install (Change password for default user ducoterra!)
virt-install \
--name "gitlab" \
--boot uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no \
--cpu host-passthrough --vcpus sockets=1,cores=8,threads=2 \
--ram=8192 \
@@ -110,7 +109,12 @@ virt-install \
--network bridge:bridge0 \
--graphics none \
--console pty,target.type=virtio \
--import --disk "path=/srv/vm/pool1/fedora-boot.raw,bus=virtio"
--name "fedora" \
--import --disk "path=/srv/vm/pool1/fedora-boot.raw,bus=virtio" \
# If you need to pass through a PCIe card
--hostdev pci_0000_4e_00_0 \
--hostdev pci_0000_4e_00_1
# convert a cloud-init image to raw
qemu-img convert -f qcow2 -O raw \

View File

@@ -6,36 +6,7 @@
- [Notes](#notes)
- [Firmware and Kernel](#firmware-and-kernel)
- [Kernel args](#kernel-args)
- [Volume Locations](#volume-locations)
- [Setup](#setup)
- [Create the AI user](#create-the-ai-user)
- [Helper aliases](#helper-aliases)
- [Create the models dir](#create-the-models-dir)
- [Install the Hugging Face CLI](#install-the-hugging-face-cli)
- [Samba Model Storage](#samba-model-storage)
- [Download models](#download-models)
- [Text models](#text-models)
- [GPT-OSS](#gpt-oss)
- [Mistral](#mistral)
- [Nemotron](#nemotron)
- [Qwen](#qwen)
- [GLM](#glm)
- [Llama](#llama)
- [Gemma](#gemma)
- [Dolphin (Abliterated)](#dolphin-abliterated)
- [Image models](#image-models)
- [Z-Image](#z-image)
- [Flux](#flux)
- [Qwen Image 2512](#qwen-image-2512)
- [Embedding Models](#embedding-models)
- [Nomic](#nomic)
- [llama.cpp](#llamacpp)
- [stable-diffusion.cpp](#stable-diffusioncpp)
- [open-webui](#open-webui)
- [VLLM](#vllm)
- [Install the whole thing with quadlets (TM)](#install-the-whole-thing-with-quadlets-tm)
- [Install the update script](#install-the-update-script)
- [Install Guest Open Webui with Start/Stop Services](#install-guest-open-webui-with-startstop-services)
- [AI](#ai)
## BIOS
@@ -65,517 +36,6 @@ amd_iommu=off amdgpu.gttsize=126976 ttm.pages_limit=32505856
Then `grub2-mkconfig -o /boot/grub2/grub.cfg` and `reboot`.
### Volume Locations
## AI
`~/.local/share/containers/storage/volumes/`
## Setup
### Create the AI user
```bash
# Create your local ai user. This will be the user you launch podman processes from.
useradd -m ai
loginctl enable-linger ai
su -l ai
mkdir -p /home/ai/.config/containers/systemd/
mkdir -p /home/ai/.ssh
```
Models are big. You'll want some tools to help find large files quickly when space runs out.
### Helper aliases
Add these to your .bashrc:
```bash
# Calculate all folder sizes in current dir
alias {dudir,dud}='du -h --max-depth 1 | sort -h'
# Calculate all file sizes in current dir
alias {dufile,duf}='ls -lhSr'
# Restart llama-server / follow logs
alias llama-reload="systemctl --user daemon-reload && systemctl --user restart llama-server.service"
alias llama-logs="journalctl --user -fu llama-server"
# Restart stable diffusion gen and edit server / follow logs
alias sd-gen-reload='systemctl --user daemon-reload && systemctl --user restart stable-diffusion-gen-server'
alias sd-gen-logs='journalctl --user -xeu stable-diffusion-gen-server'
alias sd-edit-reload='systemctl --user daemon-reload && systemctl --user restart stable-diffusion-edit-server'
alias sd-edit-logs='journalctl --user -xeu stable-diffusion-edit-server'
```
### Create the models dir
```bash
mkdir -p /home/ai/models/{text,image,video,embedding,tts,stt}
```
### Install the Hugging Face CLI
<https://huggingface.co/docs/huggingface_hub/en/guides/cli#getting-started>
```bash
# Install
curl -LsSf https://hf.co/cli/install.sh | bash
# Login
hf auth login
```
### Samba Model Storage
I recommend adding network storage for keeping models offloaded. This mounts a samba share at `/srv/models`.
```bash
# Add this to /etc/fstab
//driveripper.reeselink.com/smb_models /srv/models cifs _netdev,nofail,uid=1001,gid=1001,credentials=/etc/samba/credentials 0 0
# Then mount
systemctl daemon-reload
mount -a --mkdir
```
Here are some sync commands that I use to keep the samba share in sync with the home directory:
```bash
# Sync models from home dir to the samba share
rsync -av --progress /home/ai/models/ /srv/models/
```
### Download models
#### Text models
<https://huggingface.co/ggml-org/collections>
##### GPT-OSS
```bash
# gpt-oss-120b
mkdir /home/ai/models/text/gpt-oss-120b
hf download --local-dir /home/ai/models/text/gpt-oss-120b ggml-org/gpt-oss-120b-GGUF
# gpt-oss-20b
mkdir /home/ai/models/text/gpt-oss-20b
hf download --local-dir /home/ai/models/text/gpt-oss-20b ggml-org/gpt-oss-20b-GGUF
```
##### Mistral
```bash
# devstral-2-123b
mkdir /home/ai/models/text/devstral-2-123b
hf download --local-dir /home/ai/models/text/devstral-2-123b unsloth/Devstral-2-123B-Instruct-2512-GGUF Q4_K_M/Devstral-2-123B-Instruct-2512-Q4_K_M-00001-of-00002.gguf
hf download --local-dir /home/ai/models/text/devstral-2-123b unsloth/Devstral-2-123B-Instruct-2512-GGUF Q4_K_M/Devstral-2-123B-Instruct-2512-Q4_K_M-00002-of-00002.gguf
# devstral-small-2-24b
mkdir /home/ai/models/text/devstral-small-2-24b
hf download --local-dir /home/ai/models/text/devstral-small-2-24b unsloth/Devstral-Small-2-24B-Instruct-2512-GGUF Devstral-Small-2-24B-Instruct-2512-Q4_K_M.gguf
# ministral-3-14b
mkdir /home/ai/models/text/ministral-3-14b
hf download --local-dir /home/ai/models/text/ministral-3-14b ggml-org/Ministral-3-14B-Reasoning-2512-GGUF
# ministral-3-3b-instruct
mkdir /home/ai/models/text/ministral-3-3b-instruct
hf download --local-dir /home/ai/models/text/ministral-3-3b-instruct ggml-org/Ministral-3-3B-Instruct-2512-GGUF
```
##### Nemotron
```bash
# nemotron-nano-30b
mkdir /home/ai/models/text/nemotron-nano-30b
hf download --local-dir /home/ai/models/text/nemotron-nano-30b ggml-org/Nemotron-Nano-3-30B-A3B-GGUF Nemotron-Nano-3-30B-A3B-Q4_K_M.gguf
```
##### Qwen
```bash
# qwen3-30b-a3b-thinking
mkdir /home/ai/models/text/qwen3-30b-a3b-thinking
hf download --local-dir /home/ai/models/text/qwen3-30b-a3b-thinking ggml-org/Qwen3-30B-A3B-GGUF Qwen3-30B-A3B-Q4_K_M.gguf
# qwen3-30b-a3b-instruct
mkdir /home/ai/models/text/qwen3-30b-a3b-instruct
hf download --local-dir /home/ai/models/text/qwen3-30b-a3b-instruct ggml-org/Qwen3-30B-A3B-Instruct-2507-Q8_0-GGUF
# qwen3-coder-30b-a3b-instruct
mkdir /home/ai/models/text/qwen3-coder-30b-a3b-instruct
hf download --local-dir /home/ai/models/text/qwen3-coder-30b-a3b-instruct ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF
# qwen3-coder-next
mkdir /home/ai/models/text/qwen3-coder-next
hf download --local-dir /home/ai/models/text/qwen3-coder-next unsloth/Qwen3-Coder-Next-GGUF --include " 5_K_M/*.gguf"
# qwen3-vl-30b-thinking
mkdir /home/ai/models/text/qwen3-vl-30b-thinking
hf download --local-dir /home/ai/models/text/qwen3-vl-30b-thinking unsloth/Qwen3-VL-30B-A3B-Thinking-1M-GGUF Qwen3-VL-30B-A3B-Thinking-1M-Q4_K_M.gguf
hf download --local-dir /home/ai/models/text/qwen3-vl-30b-thinking unsloth/Qwen3-VL-30B-A3B-Thinking-1M-GGUF mmproj-F16.gguf
# qwen3-vl-8b-instruct
mkdir /home/ai/models/text/qwen3-vl-8b-instruct
hf download --local-dir /home/ai/models/text/qwen3-vl-8b-instruct Qwen/Qwen3-VL-8B-Instruct-GGUF Qwen3VL-8B-Instruct-Q4_K_M.gguf
hf download --local-dir /home/ai/models/text/qwen3-vl-8b-instruct Qwen/Qwen3-VL-8B-Instruct-GGUF mmproj-Qwen3VL-8B-Instruct-Q8_0.gguf
# qwen3-4b-2507-abliterated
mkdir /home/ai/models/text/qwen3-4b-2507-abliterated
hf download --local-dir /home/ai/models/text/qwen3-4b-2507-abliterated prithivMLmods/Qwen3-4B-2507-abliterated-GGUF Qwen3-4B-Thinking-2507-abliterated-GGUF/Qwen3-4B-Thinking-2507-abliterated.Q4_K_M.gguf
# qwen3-48b-a4b-abliterated
mkdir /home/ai/models/text/qwen3-48b-a4b-abliterated
hf download --local-dir /home/ai/models/text/qwen3-48b-a4b-abliterated DavidAU/Qwen3-48B-A4B-Savant-Commander-Distill-12X-Closed-Open-Heretic-Uncensored-GGUF Qwen3-48B-A4B-Savant-Commander-Dstll-12X-Cl-Op-Hrtic-Uncen-Q4_K_M.gguf
```
##### GLM
```bash
# glm-4.7-flash-30b
mkdir /home/ai/models/text/glm-4.7-flash-30b
hf download --local-dir /home/ai/models/text/glm-4.7-flash-30b unsloth/GLM-4.7-Flash-GGUF GLM-4.7-Flash-Q4_K_M.gguf
# glm-4.6v
mkdir /home/ai/models/text/glm-4.6v
hf download --local-dir /home/ai/models/text/glm-4.6v unsloth/GLM-4.6V-GGUF --include "Q4_K_M/*.gguf"
hf download --local-dir /home/ai/models/text/glm-4.6v unsloth/GLM-4.6V-GGUF mmproj-F16.gguf
# glm-4.6v-flash
mkdir /home/ai/models/text/glm-4.6v-flash
hf download --local-dir /home/ai/models/text/glm-4.6v-flash unsloth/GLM-4.6V-Flash-GGUF GLM-4.6V-Flash-Q4_K_M.gguf
hf download --local-dir /home/ai/models/text/glm-4.6v-flash unsloth/GLM-4.6V-Flash-GGUF mmproj-F16.gguf
```
##### Llama
```bash
# llama4-scout
mkdir /home/ai/models/text/llama4-scout
# Remember to move the gguf files into the llama4-scout folder, otherwise it won't pick up
hf download --local-dir /home/ai/models/text/llama4-scout unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF --include "Q4_K_M/*.gguf"
hf download --local-dir /home/ai/models/text/llama4-scout unsloth/Llama-4-Scout-17B-16E-Instruct-GGUF mmproj-F16.gguf
```
##### Gemma
```bash
# Note "it" vs "pt" suffixes. "it" is instruction following, "pt" is the base model (not as good for out-of-the-box use)
# gemma-3-27b-it
mkdir /home/ai/models/text/gemma-3-27b-it
hf download --local-dir /home/ai/models/text/gemma-3-27b-it unsloth/gemma-3-27b-it-GGUF gemma-3-27b-it-Q4_K_M.gguf
hf download --local-dir /home/ai/models/text/gemma-3-27b-it unsloth/gemma-3-27b-it-GGUF mmproj-F16.gguf
```
##### Dolphin (Abliterated)
```bash
# dolphin-x1-8b
mkdir /home/ai/models/text/dolphin-x1-8b
hf download --local-dir /home/ai/models/text/dolphin-x1-8b dphn/Dolphin-X1-8B-GGUF Dolphin-X1-8B-Q4_K_M.gguf
# dolphin-mistral-24b-venice
mkdir /home/ai/models/text/dolphin-mistral-24b-venice
hf download --local-dir /home/ai/models/text/dolphin-mistral-24b-venice bartowski/cognitivecomputations_Dolphin-Mistral-24B-Venice-Edition-GGUF cognitivecomputations_Dolphin-Mistral-24B-Venice-Edition-Q4_K_M.gguf
```
#### Image models
##### Z-Image
```bash
# z-turbo
# Fastest image generation in 8 steps. Great a text and prompt following.
# Lacks variety.
mkdir /home/ai/models/image/z-turbo
hf download --local-dir /home/ai/models/image/z-turbo QuantStack/FLUX.1-Kontext-dev-GGUF flux1-kontext-dev-Q4_K_M.gguf
hf download --local-dir /home/ai/models/image/z-turbo black-forest-labs/FLUX.1-schnell ae.safetensors
hf download --local-dir /home/ai/models/image/z-turbo unsloth/Qwen3-4B-Instruct-2507-GGUF Qwen3-4B-Instruct-2507-Q4_K_M.gguf
# z-image
# Full version of z-turbo. Needs 28-50 steps.
# Note, image quality not as good as z-turbo
mkdir /home/ai/models/image/z-image
hf download --local-dir /home/ai/models/image/z-image unsloth/Z-Image-GGUF z-image-Q4_K_M.gguf
hf download --local-dir /home/ai/models/image/z-image black-forest-labs/FLUX.1-schnell ae.safetensors
hf download --local-dir /home/ai/models/image/z-image unsloth/Qwen3-4B-Instruct-2507-GGUF Qwen3-4B-Instruct-2507-Q4_K_M.gguf
```
##### Flux
```bash
# flux2-klein
# Capable of generating images in 4 steps
mkdir /home/ai/models/image/flux2-klein
hf download --local-dir /home/ai/models/image/flux2-klein leejet/FLUX.2-klein-9B-GGUF flux-2-klein-9b-Q4_0.gguf
hf download --local-dir /home/ai/models/image/flux2-klein black-forest-labs/FLUX.2-dev ae.safetensors
hf download --local-dir /home/ai/models/image/flux2-klein unsloth/Qwen3-8B-GGUF Qwen3-8B-Q4_K_M.gguf
# flux-1-kontext
mkdir /home/ai/models/image/flux-1-kontext
hf download --local-dir /home/ai/models/image/flux-1-kontext leejet/Z-Image-Turbo-GGUF z_image_turbo-Q4_K.gguf
hf download --local-dir /home/ai/models/image/flux-1-kontext black-forest-labs/FLUX.1-dev ae.safetensors
hf download --local-dir /home/ai/models/image/flux-1-kontext comfyanonymous/flux_text_encoders clip_l.safetensors
hf download --local-dir /home/ai/models/image/flux-1-kontext comfyanonymous/flux_text_encoders t5xxl_fp16.safetensors
```
##### Qwen Image 2512
```bash
```
#### Embedding Models
##### Nomic
```bash
# nomic-embed-text-v2
mkdir /home/ai/models/embedding/nomic-embed-text-v2
hf download --local-dir /home/ai/models/embedding/nomic-embed-text-v2 ggml-org/Nomic-Embed-Text-V2-GGUF
```
## llama.cpp
<https://github.com/ggml-org/llama.cpp/tree/master/tools/server>
```bash
# Build the llama.cpp container image
git clone https://github.com/ggml-org/llama.cpp.git
cd llama.cpp
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
# Vulkan
podman build -f .devops/vulkan.Dockerfile -t llama-cpp-vulkan:${BUILD_TAG} -t llama-cpp-vulkan:latest .
# ROCM
podman build -f .devops/rocm.Dockerfile -t llama-cpp-rocm:${BUILD_TAG} -t llama-cpp-rocm:latest .
# Run llama server (Available on port 8000)
# Add `--n-cpu-moe 32` to gpt-oss-120b to keep minimal number of expert in GPU
podman run \
--rm \
--name llama-server-demo \
--device=/dev/kfd \
--device=/dev/dri \
--pod systemd-ai-internal \
-v /home/ai/models/text:/models:z \
localhost/llama-cpp-vulkan:latest \
--port 8000 \
-c 32000 \
--perf \
--n-gpu-layers all \
--jinja \
--models-max 1 \
--models-dir /models
# ROCM
podman run \
--rm \
--name llama-server-demo \
--device=/dev/kfd \
--device=/dev/dri \
--pod systemd-ai-internal \
-v /home/ai/models/text:/models:z \
localhost/llama-cpp-rocm:latest \
--port 8000 \
-c 0 \
--perf \
--n-gpu-layers all \
--jinja \
--models-max 1 \
--models-dir /models
```
## stable-diffusion.cpp
Server: <https://github.com/leejet/stable-diffusion.cpp/tree/master/examples/server>
CLI: <https://github.com/leejet/stable-diffusion.cpp/tree/master/examples/cli>
```bash
git clone https://github.com/leejet/stable-diffusion.cpp.git
cd stable-diffusion.cpp
git submodule update --init --recursive
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
# Vulkan
podman build -f Dockerfile.vulkan -t stable-diffusion-cpp:${BUILD_TAG} -t stable-diffusion-cpp:latest .
```
```bash
# z-turbo
podman run --rm \
-v /home/ai/models:/models:z \
-v /home/ai/output:/output:z \
--device /dev/kfd \
--device /dev/dri \
localhost/stable-diffusion-cpp:latest \
--diffusion-model /models/image/z-turbo/z_image_turbo-Q4_K.gguf \
--vae /models/image/z-turbo/ae.safetensors \
--llm /models/image/z-turbo/Qwen3-4B-Instruct-2507-Q4_K_M.gguf \
--cfg-scale 1.0 \
-v \
--seed -1 \
--steps 8 \
--vae-conv-direct \
-H 1024 \
-W 1024 \
-o /output/output.png \
-p "A photorealistic dragon"
# z-image
podman run --rm \
-v /home/ai/models:/models:z \
-v /home/ai/output:/output:z \
--device /dev/kfd \
--device /dev/dri \
localhost/stable-diffusion-cpp:latest \
--diffusion-model /models/image/z-image/z-image-Q4_K_M.gguf \
--vae /models/image/z-image/ae.safetensors \
--llm /models/image/z-image/Qwen3-4B-Instruct-2507-Q4_K_M.gguf \
--cfg-scale 1.0 \
-v \
--seed -1 \
--steps 28 \
--vae-conv-direct \
-H 1024 \
-W 1024 \
-o /output/output.png \
-p "A photorealistic dragon"
# flux2-klein
podman run --rm \
-v /home/ai/models:/models:z \
-v /home/ai/output:/output:z \
--device /dev/kfd \
--device /dev/dri \
localhost/stable-diffusion-cpp:latest \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q4_0.gguf \
--vae /models/image/flux2-klein/ae.safetensors \
--llm /models/image/flux2-klein/Qwen3-8B-Q4_K_M.gguf \
--cfg-scale 1.0 \
--steps 4 \
-v \
--seed -1 \
--vae-conv-direct \
-H 1024 \
-W 1024 \
-o /output/output.png \
-p "A photorealistic dragon"
# Edit with flux2 klein
.\bin\Release\sd-cli.exe \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q4_0.gguf \
--vae /models/image/flux2-klein/ae.safetensors \
--llm /models/image/flux2-klein/Qwen3-8B-Q4_K_M.gguf \
--cfg-scale 1.0 \
--sampling-method euler \
-v \
--vae-conv-direct \
--steps 4
-r .\kontext_input.png \
-p "change 'flux.cpp' to 'klein.cpp'" \
# Edit with flux kontext
podman run --rm \
-v /home/ai/models:/models:z \
-v /home/ai/output:/output:z \
--device /dev/kfd \
--device /dev/dri \
localhost/stable-diffusion-cpp:latest \
--diffusion-model /models/image/flux-1-kontext/flux1-kontext-dev-Q4_K_M.gguf \
--vae /models/image/flux-1-kontext/ae.safetensors \
--clip_l /models/image/flux-1-kontext/clip_l.safetensors \
--t5xxl /models/image/flux-1-kontext/t5xxl_fp16.safetensors \
--cfg-scale 1.0 \
--sampling-method euler \
--seed -1 \
--steps 28 \
--vae-conv-direct \
-v \
-H 512 \
-W 512 \
-o /output/output.png \
-r /output/everquest_logo.png \
-p "Add the text 'EverQuest'"
```
## open-webui
```bash
mkdir /home/ai/.env
# Create a file called open-webui-env with `WEBUI_SECRET_KEY="some-random-key"
scp active/device_framework_desktop/secrets/open-webui-env deskwork-ai:.env/
# Will be available on port 8080
podman run \
-d \
--pod ai \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
ghcr.io/open-webui/open-webui:main
```
Use the following connections:
| Service | Endpoint |
| -------------------- | ----------------------------------------- |
| llama.cpp | <http://host.containers.internal:8000> |
| stable-diffusion.cpp | <http://host.containers.internal:1234/v1> |
## VLLM
```bash
--group-add=video \
--cap-add=SYS_PTRACE \
--security-opt seccomp=unconfined \
--env "HF_TOKEN=$HF_TOKEN" \
--ipc=host \
mkdir -p /home/ai/vllm/.cache/huggingface
podman run --rm \
--device /dev/kfd \
--device /dev/dri \
-v /home/ai/vllm/.cache/huggingface:/root/.cache/huggingface:z \
-p 8002:8000 \
docker.io/vllm/vllm-openai-rocm:latest \
--model Qwen/Qwen3-0.6B
```
## Install the whole thing with quadlets (TM)
```bash
# Installs and runs all services in `quadlets/`
scp -r active/device_framework_desktop/quadlets/* deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-internal-pod.service
```
Note, all services will be available at `host.containers.internal`. So llama.cpp
will be up at `http://host.containers.internal:8000`.
### Install the update script
```bash
# 1. Builds the latest llama.cpp and stable-diffusion.cpp
# 2. Pulls the latest open-webui
# 3. Restarts all services
scp active/device_framework_desktop/update-script.sh deskwork-ai:
ssh deskwork-ai
chmod +x update-script.sh
./update-script.sh
```
## Install Guest Open Webui with Start/Stop Services
```bash
scp -r active/device_framework_desktop/systemd/. deskwork-ai:.config/systemd/user/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user enable open-webui-guest-start.timer
systemctl --user enable open-webui-guest-stop.timer
```
See [Self Hosted AI Stack](/active/software_ai_stack/ai_stack.md)

View File

@@ -854,11 +854,12 @@ sudo dnf install -y koji
# Search for the desired kernel version
koji search build kernel-6.18.3*
export KERNEL_VERSION=6.18.12
# Create a temporary directory to store the donwloaded kernel packages
sudo -i
mkdir /root/kernel-download-6.18.3
cd /root/kernel-download-6.18.3
mkdir /tmp/kernel-download-${KERNEL_VERSION}
cd /tmp/kernel-download-${KERNEL_VERSION}
# Download the kernel packages
koji download-build --arch=x86_64 kernel-6.18.3-200.fc43

View File

@@ -0,0 +1,10 @@
# Fedora
## Kernel Rescue
1. Check that `/boot` and `/boot/efi` aren't full
2. `mkdir -p /boot/efi/loader/entries`
3. `mkdir -p /boot/efi/$(cat /etc/machine-id)`
4. Check for other missing directories and create as needed
5. `dracut -f --regenerate-all` to regenerate missing kernels
6. `dnf reinstall kernel*` to rerun kernel installation scripts

View File

@@ -0,0 +1,499 @@
# Self Hosted AI Stack
- [Self Hosted AI Stack](#self-hosted-ai-stack)
- [Notes](#notes)
- [Podman Volume Locations](#podman-volume-locations)
- [Setup](#setup)
- [Create the AI user](#create-the-ai-user)
- [Helper aliases](#helper-aliases)
- [Create the models dir](#create-the-models-dir)
- [Install the Hugging Face CLI](#install-the-hugging-face-cli)
- [Samba Model Storage](#samba-model-storage)
- [Download models](#download-models)
- [Text models](#text-models)
- [GPT-OSS](#gpt-oss)
- [Mistral](#mistral)
- [Qwen](#qwen)
- [GLM](#glm)
- [Gemma](#gemma)
- [Dolphin](#dolphin)
- [Image models](#image-models)
- [Z-Image](#z-image)
- [Flux](#flux)
- [Embedding Models](#embedding-models)
- [Nomic](#nomic)
- [llama.cpp](#llamacpp)
- [stable-diffusion.cpp](#stable-diffusioncpp)
- [open-webui](#open-webui)
- [Install Services with Quadlets](#install-services-with-quadlets)
- [Internal and External Pods](#internal-and-external-pods)
- [Llama CPP Server](#llama-cpp-server)
- [Stable Diffusion CPP](#stable-diffusion-cpp)
- [Open Webui](#open-webui-1)
- [Install the update script](#install-the-update-script)
- [Install Guest Open Webui with Start/Stop Services](#install-guest-open-webui-with-startstop-services)
- [Benchmark Results](#benchmark-results)
## Notes
### Podman Volume Locations
`~/.local/share/containers/storage/volumes/`
## Setup
### Create the AI user
```bash
# Create your local ai user. This will be the user you launch podman processes from.
useradd -m ai
loginctl enable-linger ai
su -l ai
mkdir -p /home/ai/.config/containers/systemd/
mkdir -p /home/ai/.ssh
```
Models are big. You'll want some tools to help find large files quickly when space runs out.
### Helper aliases
Add these to your .bashrc:
```bash
# Calculate all folder sizes in current dir
alias {dudir,dud}='du -h --max-depth 1 | sort -h'
# Calculate all file sizes in current dir
alias {dufile,duf}='ls -lhSr'
# Restart llama-server / follow logs
alias llama-reload="systemctl --user daemon-reload && systemctl --user restart llama-server.service"
alias llama-logs="journalctl --user -fu llama-server"
# Restart stable diffusion gen and edit server / follow logs
alias sd-gen-reload='systemctl --user daemon-reload && systemctl --user restart stable-diffusion-gen-server'
alias sd-gen-logs='journalctl --user -xeu stable-diffusion-gen-server'
alias sd-edit-reload='systemctl --user daemon-reload && systemctl --user restart stable-diffusion-edit-server'
alias sd-edit-logs='journalctl --user -xeu stable-diffusion-edit-server'
```
### Create the models dir
```bash
mkdir -p /home/ai/models/{text,image,video,embedding,tts,stt}
```
### Install the Hugging Face CLI
<https://huggingface.co/docs/huggingface_hub/en/guides/cli#getting-started>
```bash
# Install
curl -LsSf https://hf.co/cli/install.sh | bash
# Login
hf auth login
```
### Samba Model Storage
I recommend adding network storage for keeping models offloaded. This mounts a samba share at `/srv/models`.
```bash
dnf install -y cifs-utils
# Add this to /etc/fstab
//driveripper.reeselink.com/smb_models /srv/models cifs _netdev,nofail,uid=1001,gid=1001,credentials=/etc/samba/credentials 0 0
# Then mount
systemctl daemon-reload
mount -a --mkdir
```
Here are some sync commands that I use to keep the samba share in sync with the home directory:
```bash
# Sync models from home dir to the samba share
rsync -av --progress /home/ai/models/ /srv/models/
```
### Download models
In general I try to run 8 bit quantized minimum.
#### Text models
<https://huggingface.co/ggml-org/collections>
##### GPT-OSS
<https://unsloth.ai/docs/models/gpt-oss-how-to-run-and-fine-tune#recommended-settings>
```bash
# gpt-oss-120b
mkdir gpt-oss-120b && cd gpt-oss-120b
hf download --local-dir . ggml-org/gpt-oss-120b-GGUF
# gpt-oss-20b
mkdir gpt-oss-20b && cd gpt-oss-20b
hf download --local-dir . ggml-org/gpt-oss-20b-GGUF
```
##### Mistral
```bash
# devstral-small-2-24b
mkdir devstral-small-2-24b && cd devstral-small-2-24b
hf download --local-dir . ggml-org/Devstral-Small-2-24B-Instruct-2512-GGUF Devstral-Small-2-24B-Instruct-2512-Q8_0.gguf
# ministral-3-14b
mkdir ministral-3-14b && cd ministral-3-14b
hf download --local-dir . ggml-org/Ministral-3-14B-Reasoning-2512-GGUF
# ministral-3-3b-instruct
mkdir ministral-3-3b-instruct && cd ministral-3-3b-instruct
hf download --local-dir . ggml-org/Ministral-3-3B-Instruct-2512-GGUF
```
##### Qwen
```bash
# qwen3-30b-a3b-thinking
mkdir qwen3-30b-a3b-thinking && cd qwen3-30b-a3b-thinking
hf download --local-dir . ggml-org/Qwen3-30B-A3B-Thinking-2507-Q8_0-GGUF
# qwen3-30b-a3b-instruct
mkdir qwen3-30b-a3b-instruct && cd qwen3-30b-a3b-instruct
hf download --local-dir . ggml-org/Qwen3-30B-A3B-Instruct-2507-Q8_0-GGUF
# qwen3-vl-30b-a3b-thinking
mkdir qwen3-vl-30b-a3b-thinking && cd qwen3-vl-30b-a3b-thinking
hf download --local-dir . Qwen/Qwen3-VL-30B-A3B-Thinking-GGUF Qwen3VL-30B-A3B-Thinking-Q8_0.gguf
hf download --local-dir . Qwen/Qwen3-VL-30B-A3B-Thinking-GGUF mmproj-Qwen3VL-30B-A3B-Thinking-F16.gguf
# qwen3-vl-30b-a3b-instruct
mkdir qwen3-vl-30b-a3b-instruct && cd qwen3-vl-30b-a3b-instruct
hf download --local-dir . Qwen/Qwen3-VL-30B-A3B-Instruct-GGUF Qwen3VL-30B-A3B-Instruct-Q8_0.gguf
hf download --local-dir . Qwen/Qwen3-VL-30B-A3B-Instruct-GGUF mmproj-Qwen3VL-30B-A3B-Instruct-F16.gguf
# qwen3-coder-30b-a3b-instruct
mkdir qwen3-coder-30b-a3b-instruct && cd qwen3-coder-30b-a3b-instruct
hf download --local-dir . ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF
# qwen3-coder-next
mkdir qwen3-coder-next && cd qwen3-coder-next
hf download --local-dir . unsloth/Qwen3-Coder-Next-GGUF --include "Q8_0/*.gguf"
```
##### GLM
```bash
# glm-4.7-flash-30b
mkdir glm-4.7-flash-30b && cd glm-4.7-flash-30b
hf download --local-dir . unsloth/GLM-4.7-Flash-GGUF GLM-4.7-Flash-Q8_0.gguf
```
##### Gemma
```bash
# Note "it" vs "pt" suffixes. "it" is instruction following, "pt" is the base model (not as good for out-of-the-box use)
# gemma-3-27b-it
mkdir gemma-3-27b-it && cd gemma-3-27b-it
hf download --local-dir . unsloth/gemma-3-27b-it-GGUF gemma-3-27b-it-Q8_0.gguf
hf download --local-dir . unsloth/gemma-3-27b-it-GGUF mmproj-F16.gguf
```
##### Dolphin
```bash
# dolphin-mistral-24b-venice
mkdir dolphin-mistral-24b-venice && cd dolphin-mistral-24b-venice
cd dolphin-mistral-24b-venice
hf download --local-dir . bartowski/cognitivecomputations_Dolphin-Mistral-24B-Venice-Edition-GGUF cognitivecomputations_Dolphin-Mistral-24B-Venice-Edition-Q8_0.gguf
```
#### Image models
##### Z-Image
```bash
# z-turbo
# Fastest image generation in 8 steps. Great a text and prompt following.
# Lacks variety.
mkdir /home/ai/models/image/z-turbo && cd /home/ai/models/image/z-turbo
hf download --local-dir . leejet/Z-Image-Turbo-GGUF z_image_turbo-Q8_0.gguf
hf download --local-dir . black-forest-labs/FLUX.1-schnell ae.safetensors
hf download --local-dir . unsloth/Qwen3-4B-Instruct-2507-GGUF Qwen3-4B-Instruct-2507-Q8_0.gguf
```
##### Flux
```bash
# flux2-klein
# Capable of editing images in 4 steps (though 5 is my recommended steps)
mkdir /home/ai/models/image/flux2-klein && cd /home/ai/models/image/flux2-klein
hf download --local-dir . leejet/FLUX.2-klein-9B-GGUF flux-2-klein-9b-Q8_0.gguf
hf download --local-dir . black-forest-labs/FLUX.2-dev ae.safetensors
hf download --local-dir . unsloth/Qwen3-8B-GGUF Qwen3-8B-Q8_0.gguf
```
#### Embedding Models
##### Nomic
```bash
# nomic-embed-text-v2
mkdir /home/ai/models/embedding/nomic-embed-text-v2
hf download --local-dir /home/ai/models/embedding/nomic-embed-text-v2 ggml-org/Nomic-Embed-Text-V2-GGUF
```
## llama.cpp
<https://github.com/ggml-org/llama.cpp/tree/master/tools/server>
```bash
# Build the llama.cpp container image
git clone https://github.com/ggml-org/llama.cpp.git
cd llama.cpp
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
# Vulkan (better performance as of Feb 2026)
podman build -f .devops/vulkan.Dockerfile -t llama-cpp-vulkan:${BUILD_TAG} -t llama-cpp-vulkan:latest .
# ROCM
podman build -f .devops/rocm.Dockerfile -t llama-cpp-rocm:${BUILD_TAG} -t llama-cpp-rocm:latest .
# Run llama demo server (Available on port 8000)
podman run \
--rm \
--name llama-server-demo \
--device=/dev/kfd \
--device=/dev/dri \
-v /home/ai/models/text:/models:z \
-p 8000:8000 \
localhost/llama-cpp-vulkan:latest \
--host 0.0.0.0 \
--port 8000 \
-c 32768 \
--perf \
--n-gpu-layers all \
--jinja \
--models-max 1 \
--models-dir /models
```
## stable-diffusion.cpp
Server: <https://github.com/leejet/stable-diffusion.cpp/tree/master/examples/server>
CLI: <https://github.com/leejet/stable-diffusion.cpp/tree/master/examples/cli>
```bash
git clone https://github.com/leejet/stable-diffusion.cpp.git
cd stable-diffusion.cpp
git submodule update --init --recursive
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
# Vulkan
podman build -f Dockerfile.vulkan -t stable-diffusion-cpp:${BUILD_TAG} -t stable-diffusion-cpp:latest .
```
```bash
# Generate an image with z-turbo
podman run --rm \
-v /home/ai/models:/models:z \
-v /home/ai/output:/output:z \
--device /dev/kfd \
--device /dev/dri \
localhost/stable-diffusion-cpp:latest \
--diffusion-model /models/image/z-turbo/z_image_turbo-Q8_0.gguf \
--vae /models/image/z-turbo/ae.safetensors \
--llm /models/image/z-turbo/Qwen3-4B-Instruct-2507-Q8_0.gguf \
-v \
--cfg-scale 1.0 \
--vae-conv-direct \
--diffusion-conv-direct \
--fa \
--mmap \
--seed -1 \
--steps 8 \
-H 1024 \
-W 1024 \
-o /output/output.png \
-p "A photorealistic dragon"
# Edit the generated image with flux2-klein
podman run --rm \
-v /home/ai/models:/models:z \
-v /home/ai/output:/output:z \
--device /dev/kfd \
--device /dev/dri \
localhost/stable-diffusion-cpp:latest \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q8_0.gguf \
--vae /models/image/flux2-klein/ae.safetensors \
--llm /models/image/flux2-klein/Qwen3-8B-Q8_0.gguf \
-v \
--cfg-scale 1.0 \
--sampling-method euler \
--vae-conv-direct \
--diffusion-conv-direct \
--fa \
--mmap \
--steps 5 \
-H 1024 \
-W 1024 \
-r /output/output.png \
-o /output/edit.png \
-p "Replace the dragon with an old car"
```
## open-webui
```bash
mkdir /home/ai/.env
# Create a file called open-webui-env with `WEBUI_SECRET_KEY="some-random-key"
scp active/device_framework_desktop/secrets/open-webui-env deskwork-ai:.env/
# Will be available on port 8080
podman run \
-d \
-p 8080:8080 \
-v open-webui:/app/backend/data \
--name open-webui \
--restart always \
ghcr.io/open-webui/open-webui:main
```
Use the following connections:
| Service | Endpoint |
| ------------------------- | ----------------------------------------- |
| llama.cpp | <http://host.containers.internal:8000> |
| stable-diffusion.cpp | <http://host.containers.internal:1234/v1> |
| stable-diffusion.cpp edit | <http://host.containers.internal:1235/v1> |
## Install Services with Quadlets
### Internal and External Pods
These will be used to restrict internet access to our llama.cpp and
stable-diffusion.cpp services while allowing the frontend services to
communicate with those containers.
```bash
scp -r active/device_framework_desktop/quadlets_pods/* deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user start ai-internal-pod.service ai-external-pod.service
```
### Llama CPP Server
Installs the llama.cpp server to run our text models.
```bash
scp -r active/device_framework_desktop/quadlets_llama_server/* deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-internal-pod.service
```
### Stable Diffusion CPP
Installs the stable-diffusion.cpp server to run our image models.
```bash
scp -r active/device_framework_desktop/quadlets_stable_diffusion/* deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-internal-pod.service
```
### Open Webui
Installs the open webui frontend.
```bash
scp -r active/device_framework_desktop/quadlets_openwebui/* deskwork-ai:.config/containers/systemd/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user restart ai-external-pod.service
```
Note, all services will be available at `host.containers.internal`. So llama.cpp
will be up at `http://host.containers.internal:8000`.
### Install the update script
```bash
# 1. Builds the latest llama.cpp and stable-diffusion.cpp
# 2. Pulls the latest open-webui
# 3. Restarts all services
scp active/device_framework_desktop/update-script.sh deskwork-ai:
ssh deskwork-ai
chmod +x update-script.sh
./update-script.sh
```
### Install Guest Open Webui with Start/Stop Services
Optionally install a guest openwebui service.
```bash
scp -r active/device_framework_desktop/systemd/. deskwork-ai:.config/systemd/user/
ssh deskwork-ai
systemctl --user daemon-reload
systemctl --user enable open-webui-guest-start.timer
systemctl --user enable open-webui-guest-stop.timer
```
## Benchmark Results
Benchmarks are run with [unsloth gpt-oss-20b Q8_0](https://huggingface.co/unsloth/gpt-oss-20b-GGUF/blob/main/gpt-oss-20b-Q8_0.gguf)
```bash
# Run the llama.cpp pod (AMD)
podman run -it --rm \
--device=/dev/kfd \
--device=/dev/dri \
-v /home/ai/models/text:/models:z \
--entrypoint /bin/bash \
ghcr.io/ggml-org/llama.cpp:full-vulkan
# Benchmark command
./llama-bench -m /models/benchmark/gpt-oss-20b-Q8_0.gguf
```
Framework Desktop
| model | size | params | backend | ngl | test | t/s |
| ---------------- | --------: | ------: | ------- | ---: | ----: | -------------: |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | Vulkan | 99 | pp512 | 1128.50 ± 7.60 |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | Vulkan | 99 | tg128 | 77.94 ± 0.08 |
| model | size | params | backend | ngl | test | t/s |
| ---------------- | --------: | ------: | ------- | ---: | ----: | ------------: |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | ROCm | 99 | pp512 | 526.05 ± 7.04 |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | ROCm | 99 | tg128 | 70.98 ± 0.01 |
AMD R9700
| model | size | params | backend | ngl | test | t/s |
| ---------------- | --------: | ------: | ------- | ---: | ----: | ---------------: |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | Vulkan | 99 | pp512 | 3756.79 ± 203.97 |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | Vulkan | 99 | tg128 | 174.24 ± 0.32 |
NVIDIA GeForce RTX 4080 SUPER
| model | size | params | backend | ngl | test | t/s |
| ---------------- | --------: | ------: | ------- | ---: | ----: | ------------: |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | CUDA | 99 | tg128 | 193.28 ± 1.03 |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | CUDA | 99 | tg256 | 193.55 ± 0.34 |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | CUDA | 99 | tg512 | 187.39 ± 0.10 |
NVIDIA GeForce RTX 3090
| model | size | params | backend | ngl | test | t/s |
| ---------------- | --------: | ------: | ------- | ---: | ----: | --------------: |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | CUDA | 99 | pp512 | 4297.72 ± 35.60 |
| gpt-oss 20B Q8_0 | 11.27 GiB | 20.91 B | CUDA | 99 | tg128 | 197.73 ± 0.62 |

View File

@@ -0,0 +1,39 @@
[Unit]
Description=A Llama CPP Server running an Embedding Model
[Container]
# Shared AI internal pod
Pod=ai-internal.pod
# Image is built locally via podman build
Image=localhost/llama-cpp-vulkan:latest
# Downloaded models volume
Volume=/home/ai/models/embedding:/models:z
# GPU Device
AddDevice=/dev/kfd
AddDevice=/dev/dri
# Server command
Exec=--port 8001 \
--n-gpu-layers all \
--embeddings \
-m /models/nomic-embed-text-v2/nomic-embed-text-v2-moe-q8_0.gguf
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8001/props || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
[Service]
Restart=always
# Extend Timeout to allow time to pull the image
TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target

View File

@@ -17,7 +17,7 @@ AddDevice=/dev/dri
# Server command
Exec=--port 8000 \
-c 48000 \
-c 16384 \
--perf \
--n-gpu-layers all \
--jinja \

View File

@@ -21,15 +21,16 @@ Entrypoint=/sd-server
# Server args
Exec=-l 0.0.0.0 \
--listen-port 1235 \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q4_0.gguf \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q8_0.gguf \
--vae /models/image/flux2-klein/ae.safetensors \
--llm /models/image/flux2-klein/Qwen3-8B-Q4_K_M.gguf \
--llm /models/image/flux2-klein/Qwen3-8B-Q8_0.gguf \
-v \
--cfg-scale 1.0 \
--sampling-method euler \
--cfg-scale 1.0 \
--vae-conv-direct \
--offload-to-cpu \
--diffusion-conv-direct \
--fa \
--mmap \
--seed -1 \
--steps 5

View File

@@ -21,12 +21,15 @@ Entrypoint=/sd-server
# Server args
Exec=-l 0.0.0.0 \
--listen-port 1234 \
--diffusion-model /models/image/z-turbo/z_image_turbo-Q4_K.gguf \
--diffusion-model /models/image/z-turbo/z_image_turbo-Q8_0.gguf \
--vae /models/image/z-turbo/ae.safetensors \
--llm /models/image/z-turbo/qwen_3_4b.safetensors \
--llm /models/image/z-turbo/Qwen3-4B-Instruct-2507-Q8_0.gguf \
-v \
--cfg-scale 1.0 \
--vae-conv-direct \
--diffusion-conv-direct \
--fa \
--mmap \
--seed -1 \
--steps 8

View File

@@ -56,6 +56,10 @@ version = "*"
name = "policycoreutils-python-utils"
version = "*"
[[packages]]
name = "systemd-container"
version = "*"
[[customizations.files]]
path = "/root/.inputrc"
mode = "0644"

View File

@@ -37,7 +37,7 @@ mkdir /srv/smb/sambauser
sudo semanage fcontext --add --type "samba_share_t" "/srv/smb(/.*)?"
# Run restorecon at the root of the btrfs subvolume
sudo restorecon -R /srv
sudo restorecon -FRv /srv
```
Edit /etc/samba/smb.conf

1
keys/nic_ed25519.pub Normal file
View File

@@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDeo7Zgi2fEuhoLEucLUDCOS/n61Uphbesmz363fedLj ssh@norrath.org

View File

@@ -6,7 +6,10 @@ dependencies = [
"click==8.2.1",
"mkdocs>=1.6.1",
"openai>=2.21.0",
"pika>=1.3.2",
"pytest>=9.0.2",
"pyyaml>=6.0.3",
"requests>=2.32.5",
"tqdm>=4.67.3",
"types-pyyaml>=6.0.12.20250915",
"types-tqdm>=4.67.3.20260205",

View File

@@ -0,0 +1,3 @@
# Compose
Put your compose.yaml here.

View File

@@ -0,0 +1,3 @@
# Quadlets
Put your quadlets here.

114
uv.lock generated
View File

@@ -32,6 +32,47 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
]
[[package]]
name = "charset-normalizer"
version = "3.4.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" },
{ url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" },
{ url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" },
{ url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" },
{ url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" },
{ url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" },
{ url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" },
{ url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" },
{ url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" },
{ url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" },
{ url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" },
{ url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" },
{ url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" },
{ url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" },
{ url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" },
{ url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" },
{ url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" },
{ url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" },
{ url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" },
{ url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" },
{ url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" },
{ url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" },
{ url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" },
{ url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" },
{ url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" },
{ url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" },
{ url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" },
{ url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" },
{ url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" },
{ url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" },
{ url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" },
{ url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" },
{ url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" },
]
[[package]]
name = "click"
version = "8.2.1"
@@ -91,7 +132,10 @@ dependencies = [
{ name = "click" },
{ name = "mkdocs" },
{ name = "openai" },
{ name = "pika" },
{ name = "pytest" },
{ name = "pyyaml" },
{ name = "requests" },
{ name = "tqdm" },
{ name = "types-pyyaml" },
{ name = "types-tqdm" },
@@ -102,7 +146,10 @@ requires-dist = [
{ name = "click", specifier = "==8.2.1" },
{ name = "mkdocs", specifier = ">=1.6.1" },
{ name = "openai", specifier = ">=2.21.0" },
{ name = "pika", specifier = ">=1.3.2" },
{ name = "pytest", specifier = ">=9.0.2" },
{ name = "pyyaml", specifier = ">=6.0.3" },
{ name = "requests", specifier = ">=2.32.5" },
{ name = "tqdm", specifier = ">=4.67.3" },
{ name = "types-pyyaml", specifier = ">=6.0.12.20250915" },
{ name = "types-tqdm", specifier = ">=4.67.3.20260205" },
@@ -145,6 +192,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
]
[[package]]
name = "iniconfig"
version = "2.3.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
]
[[package]]
name = "jinja2"
version = "3.1.6"
@@ -353,6 +409,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" },
]
[[package]]
name = "pika"
version = "1.3.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/db/db/d4102f356af18f316c67f2cead8ece307f731dd63140e2c71f170ddacf9b/pika-1.3.2.tar.gz", hash = "sha256:b2a327ddddf8570b4965b3576ac77091b850262d34ce8c1d8cb4e4146aa4145f", size = 145029, upload-time = "2023-05-05T14:25:43.368Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f9/f3/f412836ec714d36f0f4ab581b84c491e3f42c6b5b97a6c6ed1817f3c16d0/pika-1.3.2-py3-none-any.whl", hash = "sha256:0779a7c1fafd805672796085560d290213a465e4f6f76a6fb19e378d8041a14f", size = 155415, upload-time = "2023-05-05T14:25:41.484Z" },
]
[[package]]
name = "platformdirs"
version = "4.9.1"
@@ -362,6 +427,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/70/77/e8c95e95f1d4cdd88c90a96e31980df7e709e51059fac150046ad67fac63/platformdirs-4.9.1-py3-none-any.whl", hash = "sha256:61d8b967d34791c162d30d60737369cbbd77debad5b981c4bfda1842e71e0d66", size = 21307, upload-time = "2026-02-14T21:02:43.492Z" },
]
[[package]]
name = "pluggy"
version = "1.6.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
]
[[package]]
name = "pydantic"
version = "2.12.5"
@@ -430,6 +504,31 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" },
]
[[package]]
name = "pygments"
version = "2.19.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
]
[[package]]
name = "pytest"
version = "9.0.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "iniconfig" },
{ name = "packaging" },
{ name = "pluggy" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -490,6 +589,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/04/11/432f32f8097b03e3cd5fe57e88efb685d964e2e5178a48ed61e841f7fdce/pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04", size = 4722, upload-time = "2025-05-13T15:23:59.629Z" },
]
[[package]]
name = "requests"
version = "2.32.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "charset-normalizer" },
{ name = "idna" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
]
[[package]]
name = "six"
version = "1.17.0"