add local ai podman docs
This commit is contained in:
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
- [Local AI with Anything LLM](#local-ai-with-anything-llm)
|
- [Local AI with Anything LLM](#local-ai-with-anything-llm)
|
||||||
- [Useful links I keep losing](#useful-links-i-keep-losing)
|
- [Useful links I keep losing](#useful-links-i-keep-losing)
|
||||||
|
- [Podman](#podman)
|
||||||
- [Running Local AI on Ubuntu 24.04 with Nvidia GPU](#running-local-ai-on-ubuntu-2404-with-nvidia-gpu)
|
- [Running Local AI on Ubuntu 24.04 with Nvidia GPU](#running-local-ai-on-ubuntu-2404-with-nvidia-gpu)
|
||||||
- [Running Local AI on Arch with AMD GPU](#running-local-ai-on-arch-with-amd-gpu)
|
- [Running Local AI on Arch with AMD GPU](#running-local-ai-on-arch-with-amd-gpu)
|
||||||
- [Running Anything LLM](#running-anything-llm)
|
- [Running Anything LLM](#running-anything-llm)
|
||||||
@@ -32,6 +33,12 @@
|
|||||||
- [Example model config files from gallery](https://github.com/mudler/LocalAI/tree/master/gallery)
|
- [Example model config files from gallery](https://github.com/mudler/LocalAI/tree/master/gallery)
|
||||||
- [List of all available models](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml)
|
- [List of all available models](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml)
|
||||||
|
|
||||||
|
## Podman
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -ti --name local-ai -p 8081:8080 --device=/dev/kfd --device=/dev/dri --group-add=video --replace localai/localai:latest-gpu-vulkan
|
||||||
|
```
|
||||||
|
|
||||||
## Running Local AI on Ubuntu 24.04 with Nvidia GPU
|
## Running Local AI on Ubuntu 24.04 with Nvidia GPU
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -124,7 +131,7 @@ pipx install "huggingface_hub[cli]"
|
|||||||
podman network create --ipv6 --label local-ai local-ai
|
podman network create --ipv6 --label local-ai local-ai
|
||||||
|
|
||||||
# You might want to mount an external drive here.
|
# You might want to mount an external drive here.
|
||||||
export MODEL_DIR=/models
|
export MODEL_DIR=/srv/models
|
||||||
mkdir -p $MODEL_DIR
|
mkdir -p $MODEL_DIR
|
||||||
|
|
||||||
# LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one
|
# LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one
|
||||||
@@ -136,14 +143,13 @@ mkdir -p $MODEL_DIR
|
|||||||
podman run \
|
podman run \
|
||||||
-d \
|
-d \
|
||||||
-p 8080:8080 \
|
-p 8080:8080 \
|
||||||
-e LOCALAI_API_KEY=$(cat ~/.localai/token) \
|
|
||||||
-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \
|
-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \
|
||||||
--device /dev/dri \
|
--device /dev/dri \
|
||||||
--device /dev/kfd \
|
--device /dev/kfd \
|
||||||
--name local-ai \
|
--name local-ai \
|
||||||
--network local-ai \
|
--replace \
|
||||||
-v $MODEL_DIR:/build/models \
|
-v $MODEL_DIR:/build/models:z \
|
||||||
-v localai-tmp:/tmp/generated \
|
-v localai-tmp:/tmp/generated:z \
|
||||||
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
||||||
|
|
||||||
# The second (8081) will be our frontend. We'll protect it with basic auth.
|
# The second (8081) will be our frontend. We'll protect it with basic auth.
|
||||||
@@ -153,9 +159,9 @@ podman run \
|
|||||||
-d \
|
-d \
|
||||||
-p 8081:8080 \
|
-p 8081:8080 \
|
||||||
--name local-ai-webui \
|
--name local-ai-webui \
|
||||||
--network local-ai \
|
--replace \
|
||||||
-v $MODEL_DIR:/build/models \
|
-v $MODEL_DIR:/build/models:z \
|
||||||
-v localai-tmp:/tmp/generated \
|
-v localai-tmp:/tmp/generated:z \
|
||||||
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user