framework desktop offline ai updates

This commit is contained in:
2026-02-06 20:11:19 -05:00
parent 7626cdf998
commit 525e14965d
12 changed files with 354 additions and 45 deletions

View File

@@ -0,0 +1,6 @@
[Pod]
# ai-external is the primary network
Network=ai-external.network
Network=ai-internal.network
# open-webui
PublishPort=8080:8080/tcp

View File

@@ -0,0 +1,3 @@
[Network]
IPv6=true
Internal=true

View File

@@ -0,0 +1,8 @@
[Pod]
Network=ai-internal.network
# llama.cpp
PublishPort=8000:8000/tcp
# stable-diffusion.cpp gen
PublishPort=1234:1234/tcp
# stable-diffusion.cpp edit
PublishPort=1235:1235/tcp

View File

@@ -1,8 +0,0 @@
[Pod]
Network=ai.network
# llama.cpp
PublishPort=8000:8000/tcp
# open-webui
PublishPort=8080:8080/tcp
# stable-diffusion.cpp
PublishPort=1234:1234/tcp

View File

@@ -2,8 +2,8 @@
Description=A Llama CPP Server Running GPT OSS 120b
[Container]
# Shared AI pod
Pod=ai.pod
# Shared AI internal pod
Pod=ai-internal.pod
# Image is built locally via podman build
Image=localhost/llama-cpp-vulkan:latest
@@ -18,8 +18,6 @@ AddDevice=/dev/dri
# Server command
Exec=--port 8000 \
-c 48000 \
-b 48000 \
-ub 500 \
--perf \
--n-gpu-layers all \
--jinja \
@@ -27,7 +25,7 @@ Exec=--port 8000 \
--models-dir /models
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8000/props?model=gpt-oss-120b || exit 1
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8000/props || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s

View File

@@ -2,8 +2,8 @@
Description=An Open Webui Frontend for Local AI Services
[Container]
# Shared AI pod
Pod=ai.pod
# Shared AI external pod
Pod=ai-external.pod
# Open Webui base image
Image=ghcr.io/open-webui/open-webui:main

View File

@@ -2,8 +2,8 @@
Description=A Stable Diffusion CPP Server for Editing Images
[Container]
# Shared AI pod
Pod=ai.pod
# Shared AI Internal pod
Pod=ai-internal.pod
# Vulkan image for AMD GPU
Image=localhost/stable-diffusion-cpp:latest
@@ -21,16 +21,14 @@ Entrypoint=/sd-server
# Server args
Exec=-l 0.0.0.0 \
--listen-port 1235 \
--diffusion-model /models/image/flux-1-kontext/flux1-kontext-dev-Q4_K_M.gguf \
--vae /models/image/flux-1-kontext/ae.safetensors \
--clip_l /models/image/flux-1-kontext/clip_l.safetensors \
--t5xxl /models/image/flux-1-kontext/t5xxl_fp16.safetensors \
--diffusion-model /models/image/flux2-klein/flux-2-klein-9b-Q4_0.gguf \
--vae /models/image/flux2-klein/ae.safetensors \
--llm /models/image/flux2-klein/Qwen3-8B-Q4_K_M.gguf \
--cfg-scale 1.0 \
--sampling-method euler \
--vae-conv-direct \
--seed -1 \
--steps 28 \
-v
-v \
--steps 4 \
--vae-conv-direct
[Service]
Restart=always

View File

@@ -2,8 +2,8 @@
Description=A Stable Diffusion CPP Server for Generating Images
[Container]
# Shared AI pod
Pod=ai.pod
# Shared AI internal pod
Pod=ai-internal.pod
# Vulkan image for AMD GPU
Image=localhost/stable-diffusion-cpp:latest
@@ -24,8 +24,6 @@ Exec=-l 0.0.0.0 \
--diffusion-model /models/image/z-turbo/z_image_turbo-Q4_K.gguf \
--vae /models/image/z-turbo/ae.safetensors \
--llm /models/image/z-turbo/qwen_3_4b.safetensors \
-l 0.0.0.0 \
--listen-port 1234 \
--cfg-scale 1.0 \
--vae-conv-direct \
-v \