framework 16 ai updates
This commit is contained in:
35
active/device_framework_16/quadlets/llama-embed.container
Normal file
35
active/device_framework_16/quadlets/llama-embed.container
Normal file
@@ -0,0 +1,35 @@
|
||||
[Unit]
|
||||
Description=A Llama CPP Server Running GPT OSS 120b
|
||||
|
||||
[Container]
|
||||
# Image is built locally via podman build
|
||||
Image=localhost/llama-cpp-vulkan:latest
|
||||
|
||||
# Downloaded models volume
|
||||
Volume=/home/ai/models:/models:z
|
||||
|
||||
# Ports
|
||||
PublishPort=8010:8010
|
||||
|
||||
# GPU Device
|
||||
AddDevice=/dev/kfd
|
||||
AddDevice=/dev/dri
|
||||
|
||||
# Server command
|
||||
Exec=--port 8010 \
|
||||
-m /models/embedding/nomic-embed-text-v2/nomic-embed-text-v2-moe-q8_0.gguf \
|
||||
-ngl all \
|
||||
-ub 2048 \
|
||||
-b 2048 \
|
||||
--ctx-size 2048 \
|
||||
--embeddings \
|
||||
--models-dir /models
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
# Extend Timeout to allow time to pull the image
|
||||
TimeoutStartSec=900
|
||||
|
||||
[Install]
|
||||
# Start by default on boot
|
||||
WantedBy=multi-user.target default.target
|
||||
Reference in New Issue
Block a user