37 lines
650 B
Plaintext
37 lines
650 B
Plaintext
[Unit]
|
|
Description=A Llama CPP Server Running GPT OSS 120b
|
|
|
|
[Container]
|
|
# Shared AI pod
|
|
Pod=ai.pod
|
|
|
|
# Image is built locally via podman build
|
|
Image=localhost/llama-cpp-vulkan:latest
|
|
|
|
# Downloaded models volume
|
|
Volume=/home/ai/models/text:/models:z
|
|
|
|
# GPU Device
|
|
AddDevice=/dev/kfd
|
|
AddDevice=/dev/dri
|
|
|
|
# Server command
|
|
Exec=--port 8000 \
|
|
-c 0 \
|
|
-b 2048 \
|
|
-ub 2048 \
|
|
--perf \
|
|
--n-gpu-layers all \
|
|
--jinja \
|
|
--models-max 1 \
|
|
--models-dir /models
|
|
|
|
[Service]
|
|
Restart=always
|
|
# Extend Timeout to allow time to pull the image
|
|
TimeoutStartSec=900
|
|
|
|
[Install]
|
|
# Start by default on boot
|
|
WantedBy=multi-user.target default.target
|