add initial framework desktop config
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m13s
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m13s
This commit is contained in:
9
active/device_framework_desktop/quadlets/ai.pod
Normal file
9
active/device_framework_desktop/quadlets/ai.pod
Normal file
@@ -0,0 +1,9 @@
|
||||
[Pod]
|
||||
# llama.cpp
|
||||
PublishPort=8000:8000/tcp
|
||||
# open-webui
|
||||
PublishPort=8080:8080/tcp
|
||||
# anything-llm
|
||||
PublishPort=3001:3001/tcp
|
||||
# ollama
|
||||
PublishPort=11434:11434/tcp
|
||||
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=An Anything LLM Frontend for Local AI Services
|
||||
|
||||
[Container]
|
||||
Pod=ai.pod
|
||||
Image=docker.io/mintplexlabs/anythingllm
|
||||
Volume=anythingllm:/app/server/storage
|
||||
Volume=/home/ai/anything-llm/.env:/app/server/.env:z
|
||||
Environment=STORAGE_DIR=/app/server/storage
|
||||
AddCapability=SYS_ADMIN
|
||||
User=1000
|
||||
Group=1000
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,28 @@
|
||||
[Unit]
|
||||
Description=A Llama CPP Server Running GPT OSS 120b
|
||||
|
||||
[Container]
|
||||
Pod=ai.pod
|
||||
Image=localhost/llama-cpp-vulkan:2026-01-12-10-13-30
|
||||
Volume=llama-server-cache:/root/.cache
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
Exec=-hf ggml-org/gpt-oss-120b-GGUF \
|
||||
--ctx-size 32000 \
|
||||
--jinja \
|
||||
-ub 2048 \
|
||||
-b 2048 \
|
||||
--port 8000 \
|
||||
--host 0.0.0.0 \
|
||||
-n -1 \
|
||||
--n-gpu-layers 999
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Description=An Ollama Server
|
||||
|
||||
[Container]
|
||||
Pod=ai.pod
|
||||
Image=docker.io/ollama/ollama:0.13.5
|
||||
Volume=ollama:/root/.ollama
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
Environment=OLLAMA_VULKAN=1
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=An Open Webui Frontend for Local AI Services
|
||||
|
||||
[Container]
|
||||
Pod=ai.pod
|
||||
Image=ghcr.io/open-webui/open-webui:main
|
||||
Volume=open-webui-data:/app/backend/data
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
Reference in New Issue
Block a user