From 395d064c37927d46152be64c5053235b079ced11 Mon Sep 17 00:00:00 2001 From: ducoterra Date: Mon, 9 Dec 2024 00:30:51 -0500 Subject: [PATCH] various updates across several ai/vm related systems --- docker/graduated/nextcloud/nextcloud-aio.md | 18 ++++++++++++++++ .../graduated/fedora/fedora-kinoite.md | 21 +++++++++++++++++++ .../truenas/{README.md => truenas.md} | 0 podman/graduated/localai/localai.md | 7 +++---- podman/graduated/ollama/ollama.md | 13 ++++++++++++ 5 files changed, 55 insertions(+), 4 deletions(-) rename infrastructure/graduated/truenas/{README.md => truenas.md} (100%) diff --git a/docker/graduated/nextcloud/nextcloud-aio.md b/docker/graduated/nextcloud/nextcloud-aio.md index c523b5e..0f599f1 100644 --- a/docker/graduated/nextcloud/nextcloud-aio.md +++ b/docker/graduated/nextcloud/nextcloud-aio.md @@ -14,6 +14,7 @@ - [Changing the domain](#changing-the-domain) - [Uninstall](#uninstall) - [Edit QCOW](#edit-qcow) + - [Stuck in login screen](#stuck-in-login-screen) @@ -151,3 +152,20 @@ sudo modprobe nbd sudo qemu-nbd -c /dev/nbd0 --read-only /path/to/image.qcow2 udisksctl mount -b /dev/nbd0p1 ``` + +## Stuck in login screen + +Check logs at `/var/www/html/data/nextcloud.log` in `nextcloud-aio-nextcloud` container. + +Sometimes this is caused by a broken app or twofactor. try: + +```bash +# Disable two factor +./occ twofactorauth:state +./occ twofactorauth:disable totp +``` + +```bash +# Disable problem app +./occ app:disable integration_openai +``` diff --git a/infrastructure/graduated/fedora/fedora-kinoite.md b/infrastructure/graduated/fedora/fedora-kinoite.md index 3268a15..aedc51f 100644 --- a/infrastructure/graduated/fedora/fedora-kinoite.md +++ b/infrastructure/graduated/fedora/fedora-kinoite.md @@ -2,6 +2,7 @@ - [Fedora Kinoite](#fedora-kinoite) - [Podman](#podman) + - [Autostarting services with quadlets](#autostarting-services-with-quadlets) - [Libvirt, Qemu, KVM](#libvirt-qemu-kvm) - [Network](#network) - [VLAN Setup with nmcli](#vlan-setup-with-nmcli) @@ -58,6 +59,26 @@ export REGISTRY_AUTH_FILE=$HOME/.podman-auth.json Source that and then run `podman login` to create the file. +### Autostarting services with quadlets + +If you want to run something as your user at boot (like a systemd process, think ollama) you can +create a user quadlets like so: + +```bash +# Generate the .container file +podman run --rm ghcr.io/containers/podlet --install --description "Local AI" \ + podman run \ + -d \ + -v ollama:/root/.ollama \ + -p 11434:11434 \ + --name ollama \ + docker.io/ollama/ollama > ~/.config/systemd/user/ollama.service + +# Start the service +systemctl --user daemon-reload +systemctl --user start ollama +``` + ## Libvirt, Qemu, KVM ```bash diff --git a/infrastructure/graduated/truenas/README.md b/infrastructure/graduated/truenas/truenas.md similarity index 100% rename from infrastructure/graduated/truenas/README.md rename to infrastructure/graduated/truenas/truenas.md diff --git a/podman/graduated/localai/localai.md b/podman/graduated/localai/localai.md index 76ae357..f51ca67 100644 --- a/podman/graduated/localai/localai.md +++ b/podman/graduated/localai/localai.md @@ -123,15 +123,15 @@ mkdir -p $MODEL_DIR # LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one # LOCALAI_API_KEY will set an API key, omit to run unprotected. +# HF_TOKEN will set a login token for Hugging Face # Good for single-gpu systems. # Use the below to generate a quadlet for /etc/containers/systemd/local-ai.container # podman run --rm ghcr.io/containers/podlet --install --description "Local AI" \ podman run \ -d \ -p 8080:8080 \ --e LOCALAI_SINGLE_ACTIVE_BACKEND=true \ --e HF_TOKEN=$(cat ~/.cache/huggingface/token) \ -e LOCALAI_API_KEY=$(cat ~/.localai/token) \ +-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \ --device /dev/dri \ --device /dev/kfd \ --name local-ai \ @@ -146,12 +146,11 @@ quay.io/go-skynet/local-ai:master-hipblas-ffmpeg podman run \ -d \ -p 8081:8080 \ --e HF_TOKEN=$(cat ~/.cache/huggingface/token) \ --name local-ai-webui \ --network local-ai \ -v $MODEL_DIR:/build/models \ -v localai-tmp:/tmp/generated \ -quay.io/go-skynet/local-ai:master-ffmpeg +quay.io/go-skynet/local-ai:master-hipblas-ffmpeg ``` ## Running Anything LLM diff --git a/podman/graduated/ollama/ollama.md b/podman/graduated/ollama/ollama.md index a269d15..588b8aa 100644 --- a/podman/graduated/ollama/ollama.md +++ b/podman/graduated/ollama/ollama.md @@ -6,6 +6,7 @@ - [Unsticking models stuck in "Stopping"](#unsticking-models-stuck-in-stopping) - [Run Anything LLM Interface](#run-anything-llm-interface) - [Installing External Service with Nginx and Certbot](#installing-external-service-with-nginx-and-certbot) + - [Ollama Models](#ollama-models) - [Custom Models](#custom-models) - [From Existing Model](#from-existing-model) - [From Scratch](#from-scratch) @@ -48,7 +49,15 @@ Note your ollama instance will be available to podman containers via `http://hos ## Install and run Ollama with Podman ```bash +# AMD +# Use the below to generate a quadlet for /etc/containers/systemd/local-ai.container +# podman run --rm ghcr.io/containers/podlet --install --description "Local AI" \ podman run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 11434:11434 --name ollama docker.io/ollama/ollama:rocm + +# CPU +# Use the below to generate a quadlet for /etc/containers/systemd/local-ai.container +# podman run --rm ghcr.io/containers/podlet --install --description "Local AI" \ +podman run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama docker.io/ollama/ollama ``` ## Unsticking models stuck in "Stopping" @@ -245,6 +254,10 @@ Also consider that podman will not restart your containers at boot. You'll need from the podman run commands. Check out the comments above the podman run commands for more info. Also search the web for "podman quadlets" or ask your AI about it! +## Ollama Models + + + ## Custom Models