stable diffusion framework desktop notes
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m9s

This commit is contained in:
2026-01-08 21:55:59 -05:00
parent a2cef18efe
commit 737a58a13c

View File

@@ -32,7 +32,7 @@ git clone https://github.com/ggml-org/llama.cpp.git
export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S") export BUILD_TAG=$(date +"%Y-%m-%d-%H-%M-%S")
docker build -t llama-cpp-vulkan:${BUILD_TAG} -f .devops/vulkan.Dockerfile . docker build -t llama-cpp-vulkan:${BUILD_TAG} -f .devops/vulkan.Dockerfile .
# Run llama server # Run llama server with gpt-oss-120b
docker run \ docker run \
-d \ -d \
--restart always \ --restart always \
@@ -45,6 +45,20 @@ llama-cpp-vulkan:${BUILD_TAG} \
-hf ggml-org/gpt-oss-120b-GGUF --ctx-size 0 --jinja -ub 2048 -b 2048 \ -hf ggml-org/gpt-oss-120b-GGUF --ctx-size 0 --jinja -ub 2048 -b 2048 \
--port 8000 --host 0.0.0.0 -n -1 --n-gpu-layers 999 --port 8000 --host 0.0.0.0 -n -1 --n-gpu-layers 999
# Run llama server with devstral-small-2 24b
docker run \
-d \
--restart always \
--name=llama-server-devstral \
--network=host \
--device=/dev/kfd \
--device=/dev/dri \
-v llama-server-cache:/root/.cache \
llama-cpp-vulkan:${BUILD_TAG} \
-hf bartowski/mistralai_Devstral-Small-2-24B-Instruct-2512-GGUF \
--ctx-size 0 --jinja -ub 2048 -b 2048 \
--port 8001 --host 0.0.0.0 -n -1 --n-gpu-layers 999
# Firewall # Firewall
firewall-cmd --add-port=8000/tcp --permanent firewall-cmd --add-port=8000/tcp --permanent
firewall-cmd --reload firewall-cmd --reload
@@ -83,6 +97,14 @@ Per [the docs](https://docs.anythingllm.com/installation-docker/cloud-docker):
> use sandboxed Chromium across all runtimes for best security practices > use sandboxed Chromium across all runtimes for best security practices
```bash ```bash
mkdir /etc/anything-llm
touch /etc/anything-llm/.env
chown 1000:1000 /etc/anything-llm/.env
chmod 600 /etc/anything-llm/.env
# Add JWT_SECRET=<random string> to this file
vim /etc/anything-llm/.env
# Server will be accessible on port 3001 # Server will be accessible on port 3001
# Connect llama.cpp as a generic OpenAI LLM provider and use host # Connect llama.cpp as a generic OpenAI LLM provider and use host
# http://172.17.0.1:8000/v1 # http://172.17.0.1:8000/v1
@@ -94,10 +116,51 @@ docker run \
--name anythingllm \ --name anythingllm \
--cap-add SYS_ADMIN \ --cap-add SYS_ADMIN \
-v anythingllm:/app/server/storage \ -v anythingllm:/app/server/storage \
-v /etc/anything-llm/.env:/app/server/.env \
-e STORAGE_DIR="/app/server/storage" \ -e STORAGE_DIR="/app/server/storage" \
docker.io/mintplexlabs/anythingllm docker.io/mintplexlabs/anythingllm
# Firewall # Firewall
firewall-cmd --add-port=3001/tcp --permanent firewall-cmd --add-port=3001/tcp --permanent
firewall-cmd --reload firewall-cmd --reload
```
## Stable Diffusion CPP
```bash
# z-turbo
docker run --rm \
-v ./models:/models \
-v ./build:/output \
--device /dev/kfd \
--device /dev/dri \
ghcr.io/leejet/stable-diffusion.cpp:master-vulkan \
--diffusion-model /models/z_turbo/z_image_turbo_bf16.safetensors \
--vae /models/z_turbo/ae.safetensors \
--llm /models/z_turbo/qwen_3_4b.safetensors \
--cfg-scale 1.0 \
-v \
--diffusion-fa \
-H 1024 \
-W 512 \
-o /output/output.png \
--seed -1 \
-p "Framework Laptop 13"
# Flux2
docker run --rm \
-v ./models:/models \
-v ./build:/output \
--device /dev/kfd \
--device /dev/dri \
ghcr.io/leejet/stable-diffusion.cpp:master-vulkan \
--diffusion-model /models/flux2/flux2-dev-Q8_0.gguf \
--vae /models/flux2/ae.safetensors \
--llm /models/flux2/Mistral-Small-3.2-24B-Instruct-2506-Q8_0.gguf \
--cfg-scale 1.0 \
--sampling-method euler \
-v \
--diffusion-fa \
-o /output/output.png \
-p "A picture of sign that says 'framework'"
``` ```