local ai checkpoint
This commit is contained in:
2
active/device_framework_desktop/quadlets/ai.network
Normal file
2
active/device_framework_desktop/quadlets/ai.network
Normal file
@@ -0,0 +1,2 @@
|
||||
[Network]
|
||||
IPv6=true
|
||||
@@ -1,9 +1,8 @@
|
||||
[Pod]
|
||||
Network=ai.network
|
||||
# llama.cpp
|
||||
PublishPort=8000:8000/tcp
|
||||
# open-webui
|
||||
PublishPort=8080:8080/tcp
|
||||
# anything-llm
|
||||
PublishPort=3001:3001/tcp
|
||||
# ollama
|
||||
PublishPort=11434:11434/tcp
|
||||
# stable-diffusion.cpp
|
||||
PublishPort=1234:1234/tcp
|
||||
@@ -1,21 +0,0 @@
|
||||
[Unit]
|
||||
Description=An Anything LLM Frontend for Local AI Services
|
||||
|
||||
[Container]
|
||||
Pod=ai.pod
|
||||
Image=docker.io/mintplexlabs/anythingllm
|
||||
Volume=anythingllm:/app/server/storage
|
||||
Volume=/home/ai/anything-llm/.env:/app/server/.env:z
|
||||
Environment=STORAGE_DIR=/app/server/storage
|
||||
AddCapability=SYS_ADMIN
|
||||
User=1000
|
||||
Group=1000
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -2,21 +2,29 @@
|
||||
Description=A Llama CPP Server Running GPT OSS 120b
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
Image=localhost/llama-cpp-vulkan:2026-01-12-10-13-30
|
||||
Volume=llama-server-cache:/root/.cache
|
||||
|
||||
# Image is built locally via podman build
|
||||
Image=localhost/llama-cpp-vulkan:latest
|
||||
|
||||
# Downloaded models volume
|
||||
Volume=/home/ai/models/text:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
Exec=-hf ggml-org/gpt-oss-120b-GGUF \
|
||||
--ctx-size 32000 \
|
||||
--jinja \
|
||||
-ub 2048 \
|
||||
-b 2048 \
|
||||
--port 8000 \
|
||||
--host 0.0.0.0 \
|
||||
-n -1 \
|
||||
--n-gpu-layers 999
|
||||
# Server command
|
||||
Exec=--port 8000 \
|
||||
-c 0 \
|
||||
-b 2048 \
|
||||
-ub 2048 \
|
||||
--perf \
|
||||
--n-gpu-layers all \
|
||||
--jinja \
|
||||
--models-max 1 \
|
||||
--models-dir /models
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
[Unit]
|
||||
Description=An Ollama Server
|
||||
|
||||
[Container]
|
||||
Pod=ai.pod
|
||||
Image=docker.io/ollama/ollama:0.13.5
|
||||
Volume=ollama:/root/.ollama
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
Environment=OLLAMA_VULKAN=1
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -2,8 +2,13 @@
|
||||
Description=An Open Webui Frontend for Local AI Services
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Open Webui base image
|
||||
Image=ghcr.io/open-webui/open-webui:main
|
||||
|
||||
# Nothing too complicated here. Open Webui will basically configure itself.
|
||||
Volume=open-webui-data:/app/backend/data
|
||||
|
||||
[Service]
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
[Unit]
|
||||
Description=A Stable Diffusion CPP Server for Editing Images
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Vulkan image for AMD GPU
|
||||
Image=localhost/stable-diffusion-cpp:latest
|
||||
|
||||
# Shared models directory
|
||||
Volume=/home/ai/models:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Override entrypoint to use server
|
||||
Entrypoint=/sd-server
|
||||
|
||||
# Server args
|
||||
Exec=-l 0.0.0.0 \
|
||||
--listen-port 1235 \
|
||||
--diffusion-model /models/image/flux-1-kontext/flux1-kontext-dev-Q4_K_M.gguf \
|
||||
--vae /models/image/flux-1-kontext/ae.safetensors \
|
||||
--clip_l /models/image/flux-1-kontext/clip_l.safetensors \
|
||||
--t5xxl /models/image/flux-1-kontext/t5xxl_fp16.safetensors \
|
||||
--cfg-scale 1.0 \
|
||||
--sampling-method euler \
|
||||
-v \
|
||||
--seed -1 \
|
||||
--steps 28
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,41 @@
|
||||
[Unit]
|
||||
Description=A Stable Diffusion CPP Server for Generating Images
|
||||
|
||||
[Container]
|
||||
# Shared AI pod
|
||||
Pod=ai.pod
|
||||
|
||||
# Vulkan image for AMD GPU
|
||||
Image=localhost/stable-diffusion-cpp:latest
|
||||
|
||||
# Shared models directory
|
||||
Volume=/home/ai/models:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Override entrypoint to use server
|
||||
Entrypoint=/sd-server
|
||||
|
||||
# Server args
|
||||
Exec=-l 0.0.0.0 \
|
||||
--listen-port 1234 \
|
||||
--diffusion-model /models/image/z-turbo/z_image_turbo-Q4_K.gguf \
|
||||
--vae /models/image/z-turbo/ae.safetensors \
|
||||
--llm /models/image/z-turbo/qwen_3_4b.safetensors \
|
||||
-l 0.0.0.0 \
|
||||
--listen-port 1234 \
|
||||
--cfg-scale 1.0 \
|
||||
-v \
|
||||
--seed -1 \
|
||||
--steps 8
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
Reference in New Issue
Block a user