34 lines
672 B
Plaintext
34 lines
672 B
Plaintext
[Unit]
|
|
Description=A Llama CPP Server Running GPT OSS 120b
|
|
|
|
[Container]
|
|
# Image is built locally via podman build
|
|
Image=localhost/llama-cpp-vulkan:latest
|
|
|
|
# Downloaded models volume
|
|
Volume=/home/ai/models:/models:z
|
|
|
|
# Ports
|
|
PublishPort=8012:8012
|
|
|
|
# GPU Device
|
|
AddDevice=/dev/kfd
|
|
AddDevice=/dev/dri
|
|
|
|
# Server command
|
|
Exec=--port 8012 \
|
|
-c 0 \
|
|
--perf \
|
|
--n-gpu-layers all \
|
|
--models-dir /models \
|
|
-m /models/text/qwen2.5-coder-1.5b-instruct/qwen2.5-coder-1.5b-instruct-q8_0.gguf
|
|
|
|
[Service]
|
|
Restart=always
|
|
# Extend Timeout to allow time to pull the image
|
|
TimeoutStartSec=900
|
|
|
|
[Install]
|
|
# Start by default on boot
|
|
WantedBy=multi-user.target default.target
|