All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m3s
45 lines
968 B
Plaintext
45 lines
968 B
Plaintext
[Unit]
|
|
Description=A Llama CPP Server For Embedding Models
|
|
|
|
[Container]
|
|
# Shared AI internal pod without internet access
|
|
Pod=ai-internal.pod
|
|
|
|
# Image is built locally via podman build
|
|
Image=localhost/llama-cpp-vulkan:latest
|
|
|
|
# Downloaded models volume
|
|
Volume=/home/ai/models/embedding:/models:z
|
|
|
|
# GPU Device
|
|
AddDevice=/dev/kfd
|
|
AddDevice=/dev/dri
|
|
|
|
# Server command
|
|
Exec=--port 8001 \
|
|
-c 0 \
|
|
-b 1024 \
|
|
-ub 1024 \
|
|
--perf \
|
|
--n-gpu-layers all \
|
|
--embedding \
|
|
-m /models/emebeddinggemma-300m/embeddinggemma-300M-BF16.gguf \
|
|
--alias embed
|
|
|
|
# Health Check
|
|
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8001/health || exit 1
|
|
HealthInterval=10s
|
|
HealthRetries=3
|
|
HealthStartPeriod=10s
|
|
HealthTimeout=30s
|
|
HealthOnFailure=kill
|
|
EnvironmentFile=/home/ai/.llama-api/keys.env
|
|
|
|
[Service]
|
|
Restart=always
|
|
# Extend Timeout to allow time to pull the image
|
|
TimeoutStartSec=900
|
|
|
|
[Install]
|
|
# Start by default on boot
|
|
WantedBy=multi-user.target default.target |