Files
homelab/active/software_ai_stack/llama-think.container

48 lines
985 B
Plaintext

[Unit]
Description=A Llama CPP Server Running GPT OSS 120b
[Container]
# Shared AI internal pod
Pod=ai-internal.pod
# Image is built locally via podman build
Image=localhost/llama-cpp-vulkan:latest
# Downloaded models volume
Volume=/home/ai/models/text:/models:z
# GPU Device
AddDevice=/dev/kfd
AddDevice=/dev/dri
# Server command
Exec=--port 8000 \
-c 128000 \
--top-k 64 \
--top-p 0.95 \
--temp 1.0 \
--perf \
-v \
--n-gpu-layers all \
--jinja \
-m /models/gemma-4-26b-a4b/gemma-4-26B-A4B-it-UD-Q8_K_XL.gguf \
--mmproj /models/gemma-4-26b-a4b/mmproj-BF16.gguf \
--alias think
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8000/health || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
[Service]
Restart=always
# Extend Timeout to allow time to pull the image
TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target