reconfigure software ai stack
This commit is contained in:
51
active/software_ai_stack/llama-instruct.container
Normal file
51
active/software_ai_stack/llama-instruct.container
Normal file
@@ -0,0 +1,51 @@
|
||||
[Unit]
|
||||
Description=A Llama CPP Server Running GPT OSS 120b
|
||||
|
||||
[Container]
|
||||
# Shared AI internal pod
|
||||
Pod=ai-internal.pod
|
||||
|
||||
# Image is built locally via podman build
|
||||
Image=localhost/llama-cpp-vulkan:latest
|
||||
|
||||
# Downloaded models volume
|
||||
Volume=/home/ai/models/text:/models:z
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Server command
|
||||
Exec=--port 8002 \
|
||||
-c 16000 \
|
||||
--perf \
|
||||
-v \
|
||||
--top-k 20 \
|
||||
--top-p 0.8 \
|
||||
--min-p 0 \
|
||||
--presence-penalty 1.5 \
|
||||
--repeat-penalty 1 \
|
||||
--temp 0.7 \
|
||||
--n-gpu-layers all \
|
||||
--jinja \
|
||||
--chat-template-kwargs '{"enable_thinking": false}' \
|
||||
-m /models/qwen3.5-35b-a3b/Qwen3.5-35B-A3B-Q8_0.gguf \
|
||||
--mmproj /models/qwen3.5-35b-a3b/mmproj-F16.gguf \
|
||||
--alias instruct
|
||||
|
||||
# Health Check
|
||||
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8000/health || exit 1
|
||||
HealthInterval=10s
|
||||
HealthRetries=3
|
||||
HealthStartPeriod=10s
|
||||
HealthTimeout=30s
|
||||
HealthOnFailure=kill
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
Reference in New Issue
Block a user