Files
homelab/active/software_ai_stack/llama-embed.container

45 lines
911 B
Plaintext

[Unit]
Description=A Llama CPP Server For Embedding Models
[Container]
# Shared AI internal pod
Pod=ai-internal.pod
# Image is built locally via podman build
Image=localhost/llama-cpp-vulkan:latest
# Downloaded models volume
Volume=/home/ai/models/embedding:/models:z
# GPU Device
AddDevice=/dev/kfd
AddDevice=/dev/dri
# Server command
Exec=--port 8001 \
-c 0 \
--perf \
--n-gpu-layers all \
--models-max 1 \
--models-dir /models \
--embedding \
-m /models/qwen3-embed-4b/Qwen3-Embedding-4B-Q8_0.gguf \
--alias embed
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8001/props || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
[Service]
Restart=always
# Extend Timeout to allow time to pull the image
TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target