Files
homelab/active/software_ai_stack/llama-embed.container
ducoterra f2015e2c71
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m3s
checkpoint commit
2026-05-05 06:26:40 -04:00

45 lines
968 B
Plaintext

[Unit]
Description=A Llama CPP Server For Embedding Models
[Container]
# Shared AI internal pod without internet access
Pod=ai-internal.pod
# Image is built locally via podman build
Image=localhost/llama-cpp-vulkan:latest
# Downloaded models volume
Volume=/home/ai/models/embedding:/models:z
# GPU Device
AddDevice=/dev/kfd
AddDevice=/dev/dri
# Server command
Exec=--port 8001 \
-c 0 \
-b 1024 \
-ub 1024 \
--perf \
--n-gpu-layers all \
--embedding \
-m /models/emebeddinggemma-300m/embeddinggemma-300M-BF16.gguf \
--alias embed
# Health Check
HealthCmd=CMD-SHELL curl --fail http://127.0.0.1:8001/health || exit 1
HealthInterval=10s
HealthRetries=3
HealthStartPeriod=10s
HealthTimeout=30s
HealthOnFailure=kill
EnvironmentFile=/home/ai/.llama-api/keys.env
[Service]
Restart=always
# Extend Timeout to allow time to pull the image
TimeoutStartSec=900
[Install]
# Start by default on boot
WantedBy=multi-user.target default.target