Compare commits
70 Commits
d4571c9b70
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
acf6421b53
|
|||
|
875795a409
|
|||
|
b9d1c2a9a3
|
|||
|
6f8b7ffca6
|
|||
|
cc75227a77
|
|||
|
9ae82fc3de
|
|||
|
92edf49948
|
|||
|
25d3a7805c
|
|||
|
eb67191706
|
|||
|
d51560f979
|
|||
|
88ecb458e1
|
|||
|
31739320aa
|
|||
|
f70028cf63
|
|||
|
ecf4ae1058
|
|||
|
eff2aa4066
|
|||
|
a53e67653d
|
|||
|
d48b9a66cb
|
|||
|
2c5af8507c
|
|||
|
ba66c47719
|
|||
|
da0b06768e
|
|||
|
1c6e1b7032
|
|||
|
087d8888cf
|
|||
|
cb486ae289
|
|||
|
cd56318ab0
|
|||
|
416321206d
|
|||
|
f3c313e610
|
|||
|
52c6dac263
|
|||
|
d4fbbb185f
|
|||
|
7d2e8b6b7b
|
|||
|
3bfa67e605
|
|||
|
d929ac8888
|
|||
|
837ea91a52
|
|||
|
71a27b1b91
|
|||
|
8d250318b1
|
|||
|
041fbd0f5f
|
|||
|
498e52c134
|
|||
|
dd11ef60cb
|
|||
|
65208987ea
|
|||
|
f530181e73
|
|||
|
bcd087dc94
|
|||
|
59bdafed6a
|
|||
|
2a7a177cec
|
|||
|
fdc0f0f2c5
|
|||
|
27180d92b1
|
|||
|
2b62ad0956
|
|||
|
fb19c81d47
|
|||
|
1d39fb4dc6
|
|||
|
03c8e95275
|
|||
|
90105f8997
|
|||
|
c1e083ec04
|
|||
|
5b79da95fd
|
|||
|
1737c7a79a
|
|||
|
8ea3fe5fc4
|
|||
|
73d78db715
|
|||
|
e7835970a2
|
|||
|
f242895b51
|
|||
|
70718f209b
|
|||
|
d089c981c8
|
|||
|
495cacfb96
|
|||
|
90ba436a47
|
|||
|
3f2ac15591
|
|||
|
2559aebd5d
|
|||
|
ddb9720800
|
|||
|
525e14965d
|
|||
|
7626cdf998
|
|||
|
40f221376f
|
|||
|
27e9c42d4c
|
|||
|
43159dca7e
|
|||
|
57dcd6a806
|
|||
|
463595af75
|
@@ -3,10 +3,10 @@ run-name: Build and Push the Custom Caddy Image with Route53 DNS Certbot
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- active/podman_caddy/**
|
- active/container_caddy/**
|
||||||
- .gitea/workflows/caddy.yaml
|
- .gitea/workflows/caddy.yaml
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '@daily'
|
- cron: "@daily"
|
||||||
jobs:
|
jobs:
|
||||||
build-and-push-ddns:
|
build-and-push-ddns:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -23,8 +23,8 @@ jobs:
|
|||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: https://github.com/docker/build-push-action@v5
|
uses: https://github.com/docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ gitea.workspace }}/active/podman_caddy
|
context: ${{ gitea.workspace }}/active/container_caddy
|
||||||
file: ${{ gitea.workspace }}/active/podman_caddy/Containerfile
|
file: ${{ gitea.workspace }}/active/container_caddy/Containerfile
|
||||||
push: true
|
push: true
|
||||||
tags: "gitea.reeseapps.com/services/caddy:latest,gitea.reeseapps.com/services/caddy:${{gitea.sha}}"
|
tags: "gitea.reeseapps.com/services/caddy:latest,gitea.reeseapps.com/services/caddy:${{gitea.sha}}"
|
||||||
no-cache: true
|
no-cache: true
|
||||||
|
|||||||
@@ -3,10 +3,10 @@ run-name: Build and Push the Podman DDNS Image
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
paths:
|
paths:
|
||||||
- active/podman_ddns/**
|
- active/container_ddns/**
|
||||||
- .gitea/workflows/ddns.yaml
|
- .gitea/workflows/ddns.yaml
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '@daily'
|
- cron: "@daily"
|
||||||
jobs:
|
jobs:
|
||||||
build-and-push-ddns:
|
build-and-push-ddns:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -23,8 +23,8 @@ jobs:
|
|||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: https://github.com/docker/build-push-action@v5
|
uses: https://github.com/docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: ${{ gitea.workspace }}/active/podman_ddns
|
context: ${{ gitea.workspace }}/active/container_ddns
|
||||||
file: ${{ gitea.workspace }}/active/podman_ddns/Containerfile
|
file: ${{ gitea.workspace }}/active/container_ddns/Containerfile
|
||||||
push: true
|
push: true
|
||||||
tags: "gitea.reeseapps.com/services/ddns:latest,gitea.reeseapps.com/services/ddns:${{gitea.sha}}"
|
tags: "gitea.reeseapps.com/services/ddns:latest,gitea.reeseapps.com/services/ddns:${{gitea.sha}}"
|
||||||
no-cache: true
|
no-cache: true
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -11,3 +11,4 @@ TODO.md
|
|||||||
eicar.com
|
eicar.com
|
||||||
*.pp
|
*.pp
|
||||||
*.mod
|
*.mod
|
||||||
|
*.log
|
||||||
17
.vscode/code_oss_extensions.txt
vendored
17
.vscode/code_oss_extensions.txt
vendored
@@ -14,4 +14,21 @@ stkb.rewrap
|
|||||||
streetsidesoftware.code-spell-checker
|
streetsidesoftware.code-spell-checker
|
||||||
tamasfe.even-better-toml
|
tamasfe.even-better-toml
|
||||||
vue.volar
|
vue.volar
|
||||||
|
yzhang.markdown-all-in-onecharliermarsh.ruff
|
||||||
|
eamodio.gitlens
|
||||||
|
franneck94.vscode-python-config
|
||||||
|
franneck94.vscode-python-dev-extension-pack
|
||||||
|
hashicorp.hcl
|
||||||
|
ms-pyright.pyright
|
||||||
|
ms-python.debugpy
|
||||||
|
ms-python.mypy-type-checker
|
||||||
|
ms-python.python
|
||||||
|
ms-python.vscode-python-envs
|
||||||
|
njpwerner.autodocstring
|
||||||
|
njqdev.vscode-python-typehint
|
||||||
|
redhat.vscode-yaml
|
||||||
|
stkb.rewrap
|
||||||
|
streetsidesoftware.code-spell-checker
|
||||||
|
tamasfe.even-better-toml
|
||||||
|
vue.volar
|
||||||
yzhang.markdown-all-in-one
|
yzhang.markdown-all-in-one
|
||||||
4
.vscode/vscode.md
vendored
4
.vscode/vscode.md
vendored
@@ -109,13 +109,13 @@ previous positions.
|
|||||||
To save a list of installed extensions run:
|
To save a list of installed extensions run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
code --list-extensions >> vscode_extensions.txt
|
code --list-extensions >> .vscode/code_oss_extensions.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
To install that list of extensions run:
|
To install that list of extensions run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cat vscode_extensions.txt | xargs -L 1 code --install-extension
|
cat .vscode/code_oss_extensions.txt | xargs -L 1 code --install-extension
|
||||||
```
|
```
|
||||||
|
|
||||||
### Continue
|
### Continue
|
||||||
|
|||||||
49
README.md
49
README.md
@@ -40,6 +40,7 @@ or give me access to your servers.
|
|||||||
- ["find ." shortcuts](#find--shortcuts)
|
- ["find ." shortcuts](#find--shortcuts)
|
||||||
- [tmux](#tmux)
|
- [tmux](#tmux)
|
||||||
- [bash](#bash)
|
- [bash](#bash)
|
||||||
|
- [Bulk File/Folder Renaming](#bulk-filefolder-renaming)
|
||||||
- [SSH Setup](#ssh-setup)
|
- [SSH Setup](#ssh-setup)
|
||||||
- [Git GPG Commit Signing](#git-gpg-commit-signing)
|
- [Git GPG Commit Signing](#git-gpg-commit-signing)
|
||||||
- [Important Dates and Times](#important-dates-and-times)
|
- [Important Dates and Times](#important-dates-and-times)
|
||||||
@@ -91,15 +92,15 @@ find . -type d -exec chmod 755 {} \;
|
|||||||
Here are some handy references for default bash variables
|
Here are some handy references for default bash variables
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$0 – The name of the script being executed.
|
`$0` – The name of the script being executed.
|
||||||
$1-$9 – The first nine command-line arguments.
|
`$1-$9` – The first nine command-line arguments.
|
||||||
$# – The number of command-line arguments.
|
`$#` – The number of command-line arguments.
|
||||||
$* – All command-line arguments as a single string.
|
`$*` – All command-line arguments as a single string.
|
||||||
$@ – All command-line arguments as an array.
|
`$@` – All command-line arguments as an array.
|
||||||
$? – The exit status of the last executed command.
|
`$?` – The exit status of the last executed command.
|
||||||
$$ – The process ID of the current shell.
|
`$$` – The process ID of the current shell.
|
||||||
$! – The process ID of the last background command.
|
`$!` – The process ID of the last background command.
|
||||||
$- – Shows the current shell options or flags.
|
`$-` – Shows the current shell options or flags.
|
||||||
```
|
```
|
||||||
|
|
||||||
And here are the meanings of the shell options
|
And here are the meanings of the shell options
|
||||||
@@ -118,6 +119,15 @@ So to check if you are in an interactive shell:
|
|||||||
[ $- == *i* ]] && Some command here
|
[ $- == *i* ]] && Some command here
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Bulk File/Folder Renaming
|
||||||
|
|
||||||
|
```bash
|
||||||
|
for change_dir in $(ls | grep 'podman_*'); do
|
||||||
|
new_name=$(echo $change_dir | sed 's/podman_/container_/g')
|
||||||
|
mv $change_dir $new_name`;
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
## SSH Setup
|
## SSH Setup
|
||||||
|
|
||||||
Generate a key (password protect it!)
|
Generate a key (password protect it!)
|
||||||
@@ -197,16 +207,15 @@ signed you'll see an output. If not, nothing will show.
|
|||||||
|
|
||||||
## Important Dates and Times
|
## Important Dates and Times
|
||||||
|
|
||||||
| Time | Day | Description |
|
| Time | Day | Description |
|
||||||
| ----- | -------- | ---------------------------------- |
|
| ----- | --- | ---------------------------------- |
|
||||||
| 00:00 | All | Automated builds |
|
| 00:00 | All | Automated builds |
|
||||||
| 00:00 | All | NAS Snapshots |
|
| 00:00 | All | NAS Snapshots |
|
||||||
| 02:00 | All | Backups |
|
| 02:00 | All | Backups |
|
||||||
| 04:00 | All | Bare Metal Server Security Updates |
|
| 04:00 | All | Bare Metal Server Security Updates |
|
||||||
| 05:00 | All | VM Server Security Updates |
|
| 05:00 | All | VM Server Security Updates |
|
||||||
| 05:00 | All | Unifi Protect Firmware Updates |
|
| 05:00 | All | Unifi Protect Firmware Updates |
|
||||||
| 06:00 | All | Unifi Network Firmware Updates |
|
| 06:00 | All | Unifi Network Firmware Updates |
|
||||||
| 06:00 | Saturday | Truenas Disk Scrub |
|
|
||||||
|
|
||||||
## Project Lifecycle
|
## Project Lifecycle
|
||||||
|
|
||||||
@@ -296,7 +305,7 @@ Assuming your project name is `my-project` and it runs on `podman`
|
|||||||
1. Create a new directory called `podman_my-project` under the `active`
|
1. Create a new directory called `podman_my-project` under the `active`
|
||||||
directory
|
directory
|
||||||
2. Copy the readme template: `cp project_readme_template.md
|
2. Copy the readme template: `cp project_readme_template.md
|
||||||
active/podman_my-project/my-project.md`
|
active/container_my-project/my-project.md`
|
||||||
3. Populate `my-project.md` as you work through the install process
|
3. Populate `my-project.md` as you work through the install process
|
||||||
4. Create a directory called `secrets` in `podman_my-project`. This will be
|
4. Create a directory called `secrets` in `podman_my-project`. This will be
|
||||||
automatically gitignored. Put all secrets here.
|
automatically gitignored. Put all secrets here.
|
||||||
|
|||||||
208
active/container_bifrost/bifrost.md
Normal file
208
active/container_bifrost/bifrost.md
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
# Podman bifrost
|
||||||
|
|
||||||
|
- [Podman bifrost](#podman-bifrost)
|
||||||
|
- [Setup bifrost Project](#setup-bifrost-project)
|
||||||
|
- [Install bifrost](#install-bifrost)
|
||||||
|
- [Create the ai user](#create-the-ai-user)
|
||||||
|
- [Write the bifrost compose spec](#write-the-bifrost-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert bifrost compose spec to quadlets](#convert-bifrost-compose-spec-to-quadlets)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose bifrost](#expose-bifrost)
|
||||||
|
- [Using bifrost](#using-bifrost)
|
||||||
|
- [Adding Models](#adding-models)
|
||||||
|
- [Testing Models](#testing-models)
|
||||||
|
- [Backup bifrost](#backup-bifrost)
|
||||||
|
- [Upgrade bifrost](#upgrade-bifrost)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup bifrost Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_bifrost
|
||||||
|
- [ ] Find and replace bifrost with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install bifrost
|
||||||
|
|
||||||
|
### Create the ai user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd ai
|
||||||
|
loginctl enable-linger $(id -u ai)
|
||||||
|
systemctl --user --machine=ai@.host enable podman-restart
|
||||||
|
systemctl --user --machine=ai@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/ai/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the bifrost compose spec
|
||||||
|
|
||||||
|
Edit the compose.yaml at active/container_bifrost/compose/compose.yaml
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/bifrost/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert bifrost compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_bifrost/compose:/compose \
|
||||||
|
-v $(pwd)/active/container_bifrost/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=ai-ai
|
||||||
|
scp -r active/container_bifrost/quadlets/. $PODMAN_SERVER:/home/ai/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart bifrost
|
||||||
|
journalctl --user -u bifrost -f
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose bifrost
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
## Using bifrost
|
||||||
|
|
||||||
|
### Adding Models
|
||||||
|
|
||||||
|
```json
|
||||||
|
// qwen3.5-35b-a3b-thinking
|
||||||
|
{
|
||||||
|
"temperature": 1,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-coding
|
||||||
|
{
|
||||||
|
"temperature": 0.6,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-instruct
|
||||||
|
{
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Models
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List models
|
||||||
|
curl -L -X GET 'https://aipi.reeseapps.com/v1/models' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234'
|
||||||
|
|
||||||
|
curl -L -X POST 'https://aipi.reeseapps.com/v1/chat/completions' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234' \
|
||||||
|
-d '{
|
||||||
|
"model": "gpt-4o-mini", # 👈 REPLACE with 'public model name' for any db-model
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"content": "Hey, how's it going",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup bifrost
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade bifrost
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-bifrost-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/bifrost/.config/containers/systemd/
|
||||||
|
ssh bifrost systemctl --user daemon-reload
|
||||||
|
ssh bifrost systemctl --user restart bifrost
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user bifrost is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel bifrost
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
32
active/container_bifrost/compose/compose.yaml
Normal file
32
active/container_bifrost/compose/compose.yaml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
services:
|
||||||
|
bifrost:
|
||||||
|
image: docker.io/maximhq/bifrost:latest
|
||||||
|
container_name: bifrost
|
||||||
|
ports:
|
||||||
|
- "8000:8000"
|
||||||
|
volumes:
|
||||||
|
- bifrost-data:/app/data
|
||||||
|
environment:
|
||||||
|
- APP_PORT=8000
|
||||||
|
- APP_HOST=0.0.0.0
|
||||||
|
- LOG_LEVEL=info
|
||||||
|
- LOG_STYLE=json
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--no-verbose",
|
||||||
|
"--tries=1",
|
||||||
|
"-O",
|
||||||
|
"/dev/null",
|
||||||
|
"http://localhost:8080/health",
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
restart: unless-stopped
|
||||||
17
active/container_bifrost/quadlets/bifrost.container
Normal file
17
active/container_bifrost/quadlets/bifrost.container
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[Container]
|
||||||
|
ContainerName=bifrost
|
||||||
|
Environment=APP_PORT=8000 APP_HOST=0.0.0.0 LOG_LEVEL=info LOG_STYLE=json
|
||||||
|
HealthCmd=["wget", "--no-verbose", "--tries=1", "-O", "/dev/null", "http://localhost:8080/health"]
|
||||||
|
HealthInterval=30s
|
||||||
|
HealthRetries=3
|
||||||
|
HealthTimeout=10s
|
||||||
|
Image=docker.io/maximhq/bifrost:latest
|
||||||
|
PublishPort=8000:8000
|
||||||
|
Ulimit=nofile=65536:65536
|
||||||
|
Volume=bifrost-data:/app/data
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
@@ -2,6 +2,14 @@
|
|||||||
|
|
||||||
<https://gitea.baerentsen.space/FrederikBaerentsen/BrickTracker/src/branch/master/docs/quickstart.md>
|
<https://gitea.baerentsen.space/FrederikBaerentsen/BrickTracker/src/branch/master/docs/quickstart.md>
|
||||||
|
|
||||||
|
## Update
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_bricktracker/connorbricktracker-compose.yaml bricktracker:
|
||||||
|
ssh bricktracker
|
||||||
|
docker compose -f connorbricktracker-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
### Create the bricktracker user
|
### Create the bricktracker user
|
||||||
@@ -34,7 +42,7 @@ mkdir -p /home/bricktracker/.config/containers/systemd
|
|||||||
bricktracker:
|
bricktracker:
|
||||||
container_name: BrickTracker
|
container_name: BrickTracker
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
image: gitea.baerentsen.space/frederikbaerentsen/bricktracker:1.2.2
|
image: gitea.baerentsen.space/frederikbaerentsen/bricktracker:1.3.1
|
||||||
ports:
|
ports:
|
||||||
- "3333:3333"
|
- "3333:3333"
|
||||||
volumes:
|
volumes:
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
services:
|
||||||
|
bricktracker:
|
||||||
|
container_name: BrickTracker
|
||||||
|
restart: unless-stopped
|
||||||
|
image: gitea.baerentsen.space/frederikbaerentsen/bricktracker:1.3.1
|
||||||
|
ports:
|
||||||
|
- "3333:3333"
|
||||||
|
volumes:
|
||||||
|
- /srv/bricktracker/connorbricktracker/data:/var/lib/bricktracker
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/instructions:/app/static/instructions
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/minifigures:/app/static/minifigures
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/parts:/app/static/parts
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/sets:/app/static/sets
|
||||||
|
env_file: "/srv/bricktracker/connorbricktracker/.env"
|
||||||
|
security_opt:
|
||||||
|
- label=disable
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
FROM docker.io/caddy:2-builder AS builder
|
FROM docker.io/caddy:2-builder AS builder
|
||||||
|
|
||||||
RUN xcaddy build \
|
RUN xcaddy build \
|
||||||
--with github.com/caddy-dns/route53@v1.6.0
|
--with github.com/caddy-dns/route53@v1.6.0 \
|
||||||
|
--with github.com/fabriziosalmi/caddy-waf
|
||||||
|
|
||||||
|
|
||||||
FROM docker.io/caddy:2
|
FROM docker.io/caddy:2
|
||||||
|
|
||||||
210
active/container_caddy/caddy.md
Normal file
210
active/container_caddy/caddy.md
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
# Caddy Reverse Proxy
|
||||||
|
|
||||||
|
- [Caddy Reverse Proxy](#caddy-reverse-proxy)
|
||||||
|
- [Custom Caddy Image](#custom-caddy-image)
|
||||||
|
- [Install Caddy](#install-caddy)
|
||||||
|
- [Ansible](#ansible)
|
||||||
|
- [Manual](#manual)
|
||||||
|
- [Adding a new Caddy Record](#adding-a-new-caddy-record)
|
||||||
|
- [Logs](#logs)
|
||||||
|
- [Caddy WAF](#caddy-waf)
|
||||||
|
|
||||||
|
## Custom Caddy Image
|
||||||
|
|
||||||
|
This repo builds a custom caddy image with route53 DNS certbot support.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman image pull gitea.reeseapps.com/services/caddy:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
To upgrade the image, check [the caddy-dns route53
|
||||||
|
project](https://github.com/caddy-dns/route53/tags) releases and update the
|
||||||
|
`Containerfile` with the new version.
|
||||||
|
|
||||||
|
## Install Caddy
|
||||||
|
|
||||||
|
### Ansible
|
||||||
|
|
||||||
|
You'll need a secrets/Caddyfile with your caddy config.
|
||||||
|
|
||||||
|
`secrets/Caddyfile` example:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
https://something.reeseapps.com:443 {
|
||||||
|
reverse_proxy internal.reeselink.com:8000
|
||||||
|
}
|
||||||
|
|
||||||
|
https://something-else.reeseapps.com:443 {
|
||||||
|
reverse_proxy internal-other.reeselink.com:8080
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to add [your route53 configuration](https://github.com/caddy-dns/route53?tab=readme-ov-file#configuration)
|
||||||
|
|
||||||
|
```conf
|
||||||
|
tls {
|
||||||
|
dns route53 {
|
||||||
|
access_key_id "..."
|
||||||
|
secret_access_key "..."
|
||||||
|
region "us-east-1"
|
||||||
|
wait_for_route53_sync true
|
||||||
|
skip_route53_sync_on_delete true
|
||||||
|
route53_max_wait 2m
|
||||||
|
max_retries 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The playbook limits the installer to `hosts: caddy` so make sure you have a caddy
|
||||||
|
host in your inventory.
|
||||||
|
|
||||||
|
Now you can install the Caddy service with something like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Base Proxy
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
active/container_caddy/install_caddy_proxy.yaml
|
||||||
|
|
||||||
|
# Deskwork (AI) Proxy
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
active/container_caddy/install_caddy_deskwork.yaml
|
||||||
|
|
||||||
|
# Toybox (AI) Proxy
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
active/container_caddy/install_caddy_toybox.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
See ansible playbook [install_caddy.yaml](/active/container_caddy/install_caddy.yaml)
|
||||||
|
|
||||||
|
### Manual
|
||||||
|
|
||||||
|
As root
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir /etc/caddy
|
||||||
|
vim /etc/caddy/Caddyfile
|
||||||
|
```
|
||||||
|
|
||||||
|
Caddy will automatically provision certificates if the server DNS points to the correct IP
|
||||||
|
and is accessible on the ports specifified. All you need to do is put `https` in the caddy conf.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
# Gitea
|
||||||
|
https://gitea.reeseapps.com:443 {
|
||||||
|
reverse_proxy podman.reeselink.com:3000
|
||||||
|
}
|
||||||
|
|
||||||
|
# Jellyfin
|
||||||
|
https://jellyfin.reeseapps.com:443 {
|
||||||
|
reverse_proxy podman.reeselink.com:8096
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim /etc/containers/systemd/caddy.container
|
||||||
|
```
|
||||||
|
|
||||||
|
```conf
|
||||||
|
[Unit]
|
||||||
|
Description=Caddy
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AddCapability=NET_ADMIN
|
||||||
|
ContainerName=caddy
|
||||||
|
Image=docker.io/caddy:2
|
||||||
|
Network=host
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/etc/caddy:/etc/caddy
|
||||||
|
Volume=caddy_data:/data
|
||||||
|
Volume=caddy_config:/config
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart caddy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Adding a new Caddy Record
|
||||||
|
|
||||||
|
Before you can create a Caddyfile you need records that point to your server.
|
||||||
|
|
||||||
|
You can either create them manually in your DNS provider of choice or use the provided
|
||||||
|
ddns service:
|
||||||
|
|
||||||
|
1. Update the [ddns caddy records](/active/container_ddns/secrets/caddy_records.yaml)
|
||||||
|
2. (Optional) Update the Caddyfile at `active/container_caddy/secrets/Caddyfile`
|
||||||
|
3. Run the [caddy ansible playbook](/active/container_caddy/caddy.md#install-caddy)
|
||||||
|
|
||||||
|
## Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Follow remote connections
|
||||||
|
podman logs -f caddy | grep -e '^{' | jq -c '.request | {remote_ip,host}'
|
||||||
|
|
||||||
|
# Filter out noisy hosts
|
||||||
|
podman logs -f caddy | grep -e '^{' | jq -c '.request | {remote_ip,host} | select(.host != "gitea.reeseapps.com")'
|
||||||
|
|
||||||
|
# Focus on user agents
|
||||||
|
podman logs -f caddy | grep -e '^{' | jq -c '
|
||||||
|
{
|
||||||
|
"User-Agent": .request.headers["User-Agent"],
|
||||||
|
remote_ip: .request.remote_ip,
|
||||||
|
host: .request.host,
|
||||||
|
status: .status
|
||||||
|
}
|
||||||
|
'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Caddy WAF
|
||||||
|
|
||||||
|
<https://github.com/fabriziosalmi/caddy-waf>
|
||||||
|
|
||||||
|
1. Copy the rules.json to `/etc/caddy/rules.json`
|
||||||
|
2. Update the Caddyfile to something like this:
|
||||||
|
|
||||||
|
```Caddyfile
|
||||||
|
gitea.reeseapps.com:443 {
|
||||||
|
log {
|
||||||
|
output stdout
|
||||||
|
format json {
|
||||||
|
message_key msg # Key for the log message
|
||||||
|
level_key severity # Key for the log level
|
||||||
|
time_key timestamp # Key for the timestamp
|
||||||
|
name_key logger # Key for the logger name
|
||||||
|
caller_key function # Key for the caller information
|
||||||
|
stacktrace_key stack # Key for error stacktraces
|
||||||
|
time_format "2006-01-02 15:04:05 MST" # RFC3339-like format
|
||||||
|
time_local # Use local timezone
|
||||||
|
duration_format "ms" # Show durations in milliseconds
|
||||||
|
level_format "upper" # Uppercase log levels
|
||||||
|
}
|
||||||
|
}
|
||||||
|
route {
|
||||||
|
waf {
|
||||||
|
metrics_endpoint /waf_metrics
|
||||||
|
rule_file rules.json
|
||||||
|
}
|
||||||
|
|
||||||
|
@wafmetrics {
|
||||||
|
path /waf_metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
handle @wafmetrics { } # empty → let the WAF serve the metrics
|
||||||
|
|
||||||
|
handle {
|
||||||
|
reverse_proxy gitea.reeselink.com:3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
- name: Create DDNS Service
|
- name: Create Caddy Proxy
|
||||||
hosts: caddy
|
hosts: deskwork-root
|
||||||
tasks:
|
tasks:
|
||||||
- name: Create /etc/caddy dir
|
- name: Create /etc/caddy dir
|
||||||
ansible.builtin.file:
|
ansible.builtin.file:
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
mode: '0755'
|
mode: '0755'
|
||||||
- name: Copy Caddyfile
|
- name: Copy Caddyfile
|
||||||
template:
|
template:
|
||||||
src: secrets/Caddyfile
|
src: secrets/deskwork.Caddyfile
|
||||||
dest: /etc/caddy/Caddyfile
|
dest: /etc/caddy/Caddyfile
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
45
active/container_caddy/install_caddy_proxy.yaml
Normal file
45
active/container_caddy/install_caddy_proxy.yaml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
- name: Create Caddy Proxy
|
||||||
|
hosts: caddy
|
||||||
|
tasks:
|
||||||
|
- name: Copy Containerfile for build
|
||||||
|
template:
|
||||||
|
src: Containerfile
|
||||||
|
dest: /etc/caddy/Containerfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Build Caddy Image
|
||||||
|
shell:
|
||||||
|
cmd: podman build -t gitea.reeseapps.com/services/caddy:latest -f /etc/caddy/Containerfile
|
||||||
|
- name: Create /etc/caddy dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/caddy
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
- name: Copy Caddyfile
|
||||||
|
template:
|
||||||
|
src: secrets/proxy.Caddyfile
|
||||||
|
dest: /etc/caddy/Caddyfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Copy rules.json
|
||||||
|
template:
|
||||||
|
src: rules.json
|
||||||
|
dest: /etc/caddy/rules.json
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Template Caddy Container Services
|
||||||
|
template:
|
||||||
|
src: caddy.container
|
||||||
|
dest: /etc/containers/systemd/caddy.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Reload and start the Caddy service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: caddy.service
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
28
active/container_caddy/install_caddy_toybox.yaml
Normal file
28
active/container_caddy/install_caddy_toybox.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
- name: Create Caddy Proxy
|
||||||
|
hosts: toybox-root
|
||||||
|
tasks:
|
||||||
|
- name: Create /etc/caddy dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/caddy
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
- name: Copy Caddyfile
|
||||||
|
template:
|
||||||
|
src: secrets/toybox.Caddyfile
|
||||||
|
dest: /etc/caddy/Caddyfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Template Caddy Container Services
|
||||||
|
template:
|
||||||
|
src: caddy.container
|
||||||
|
dest: /etc/containers/systemd/caddy.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Reload and start the Caddy service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: caddy.service
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
26
active/container_caddy/rules.json
Normal file
26
active/container_caddy/rules.json
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "block-scanners",
|
||||||
|
"phase": 1,
|
||||||
|
"pattern": "(?i)(nikto|sqlmap|nmap|acunetix|nessus|openvas|wpscan|dirbuster|burpsuite|owasp zap|netsparker|appscan|arachni|skipfish|gobuster|wfuzz|hydra|metasploit|nessus|openvas|qualys|zap|w3af|openwebspider|netsparker|appspider|rapid7|nessus|qualys|nuclei|zgrab|vega|gospider|gxspider|whatweb|xspider|joomscan|uniscan|blindelephant)",
|
||||||
|
"targets": [
|
||||||
|
"HEADERS:User-Agent"
|
||||||
|
],
|
||||||
|
"severity": "CRITICAL",
|
||||||
|
"action": "block",
|
||||||
|
"score": 10,
|
||||||
|
"description": "Block traffic from known vulnerability scanners and penetration testing tools. Includes more scanners."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "block-crawlers",
|
||||||
|
"phase": 1,
|
||||||
|
"pattern": "(meta-externalagent)",
|
||||||
|
"targets": [
|
||||||
|
"HEADERS:User-Agent"
|
||||||
|
],
|
||||||
|
"severity": "CRITICAL",
|
||||||
|
"action": "block",
|
||||||
|
"score": 10,
|
||||||
|
"description": "Block traffic from web scrapers and crawlers."
|
||||||
|
}
|
||||||
|
]
|
||||||
173
active/container_certbot/certbot.md
Normal file
173
active/container_certbot/certbot.md
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
# Podman certbot
|
||||||
|
|
||||||
|
- [Podman certbot](#podman-certbot)
|
||||||
|
- [Setup certbot Project](#setup-certbot-project)
|
||||||
|
- [Install certbot](#install-certbot)
|
||||||
|
- [Create the certbot user](#create-the-certbot-user)
|
||||||
|
- [Write the certbot compose spec](#write-the-certbot-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert certbot compose spec to quadlets](#convert-certbot-compose-spec-to-quadlets)
|
||||||
|
- [Create any container-mounted directories](#create-any-container-mounted-directories)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose certbot](#expose-certbot)
|
||||||
|
- [firewalld](#firewalld)
|
||||||
|
- [Backup certbot](#backup-certbot)
|
||||||
|
- [Upgrade certbot](#upgrade-certbot)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup certbot Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_certbot
|
||||||
|
- [ ] Find and replace certbot with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install certbot
|
||||||
|
|
||||||
|
### Create the certbot user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd certbot
|
||||||
|
loginctl enable-linger $(id -u certbot)
|
||||||
|
systemctl --user --machine=certbot@.host enable podman-restart
|
||||||
|
systemctl --user --machine=certbot@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/certbot/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the certbot compose spec
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run -it --rm --name certbot \
|
||||||
|
-v "/etc/letsencrypt:/etc/letsencrypt:Z" \
|
||||||
|
-v "/var/lib/letsencrypt:/var/lib/letsencrypt:Z" \
|
||||||
|
certbot/certbot certonly -d keycloak.reeseapps.com -d keycloak.reeselink.com
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/certbot/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert certbot compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_certbot/:/compose \
|
||||||
|
-v $(pwd)/active/container_certbot/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r active/container_certbot/quadlets/. $PODMAN_SERVER:/home/certbot/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R certbot:certbot /home/certbot/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create any container-mounted directories
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell certbot@
|
||||||
|
podman unshare
|
||||||
|
mkdir some_volume
|
||||||
|
# Chown to the namespaced user with UID 1000
|
||||||
|
# This will be some really obscure UID outside the namespace
|
||||||
|
# This will also solve most permission denied errors
|
||||||
|
chown -R 1000:1000 some_volume
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell certbot@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart certbot
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose certbot
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
#### firewalld
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# command to get current active zone and default zone
|
||||||
|
firewall-cmd --get-active-zones
|
||||||
|
firewall-cmd --get-default-zone
|
||||||
|
|
||||||
|
# command to open 443 on tcp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
|
||||||
|
|
||||||
|
# command to open 80 and 443 on tcp and udp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
|
||||||
|
|
||||||
|
# command to list available services and then open http and https
|
||||||
|
firewall-cmd --get-services
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup certbot
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade certbot
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-certbot-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/certbot/.config/containers/systemd/
|
||||||
|
ssh certbot systemctl --user daemon-reload
|
||||||
|
ssh certbot systemctl --user restart certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user certbot is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
10
active/container_certbot/quadlets/certbot.service
Normal file
10
active/container_certbot/quadlets/certbot.service
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Runs certbot renew
|
||||||
|
After=syslog.target network.target auditd.service
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/command -with -arguments
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
11
active/container_certbot/quadlets/certbot.timer
Normal file
11
active/container_certbot/quadlets/certbot.timer
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Daily certbot certificate renewal
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=daily
|
||||||
|
AccuracySec=12h
|
||||||
|
Persistent=true
|
||||||
|
Unit=certbot.service
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
1
active/container_ddns/.python-version
Normal file
1
active/container_ddns/.python-version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.13
|
||||||
@@ -15,6 +15,9 @@ ENV PATH="/root/.local/bin/:$PATH"
|
|||||||
# Copy the project into the image
|
# Copy the project into the image
|
||||||
COPY update.py uv.lock pyproject.toml /app/
|
COPY update.py uv.lock pyproject.toml /app/
|
||||||
|
|
||||||
|
# Copy the records file
|
||||||
|
COPY records.yaml /etc/ddns/records.yaml
|
||||||
|
|
||||||
# Sync the project into a new environment, using the frozen lockfile
|
# Sync the project into a new environment, using the frozen lockfile
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
RUN uv sync --frozen
|
RUN uv sync --frozen
|
||||||
@@ -4,15 +4,14 @@ After=network-online.target
|
|||||||
Wants=network-online.target
|
Wants=network-online.target
|
||||||
|
|
||||||
[Container]
|
[Container]
|
||||||
Environment=ROUTE53_RECORD={{ item.record }}
|
Environment=ROUTE53_RECORDS_FILE=/etc/ddns/records.yaml
|
||||||
Environment=HOSTED_ZONE_ID={{ item.hosted_zone_id }}
|
|
||||||
Environment=AWS_ACCESS_KEY_ID={{ aws.access_key_id }}
|
Environment=AWS_ACCESS_KEY_ID={{ aws.access_key_id }}
|
||||||
Environment=AWS_SECRET_ACCESS_KEY={{ aws.secret_access_key }}
|
Environment=AWS_SECRET_ACCESS_KEY={{ aws.secret_access_key }}
|
||||||
{% if item.skip_ipv6 | default(false) %}
|
{% if item.skip_ipv6 | default(false) %}
|
||||||
Environment=SKIP_IPV6=true
|
Environment=GLOBAL_SKIP_IPV6=true
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if item.skip_ipv4 | default(false) %}
|
{% if item.skip_ipv4 | default(false) %}
|
||||||
Environment=SKIP_IPV4=true
|
Environment=GLOBAL_SKIP_IPV4=true
|
||||||
{% endif %}
|
{% endif %}
|
||||||
Image=gitea.reeseapps.com/services/ddns:latest
|
Image=gitea.reeseapps.com/services/ddns:latest
|
||||||
Network=ddns.network
|
Network=ddns.network
|
||||||
@@ -3,9 +3,7 @@
|
|||||||
- [DDNS for Route53](#ddns-for-route53)
|
- [DDNS for Route53](#ddns-for-route53)
|
||||||
- [Quickly Update DDNS Records](#quickly-update-ddns-records)
|
- [Quickly Update DDNS Records](#quickly-update-ddns-records)
|
||||||
- [Install a New DDNS Service](#install-a-new-ddns-service)
|
- [Install a New DDNS Service](#install-a-new-ddns-service)
|
||||||
- [Ansible 3D Server Records](#ansible-3d-server-records)
|
- [Ansible Caddy Records](#ansible-caddy-records)
|
||||||
- [Ansible Unifi External Records](#ansible-unifi-external-records)
|
|
||||||
- [Ansible Hostname reeselink records](#ansible-hostname-reeselink-records)
|
|
||||||
- [Development](#development)
|
- [Development](#development)
|
||||||
- [Testing](#testing)
|
- [Testing](#testing)
|
||||||
- [Building Container Image](#building-container-image)
|
- [Building Container Image](#building-container-image)
|
||||||
@@ -59,13 +57,9 @@ Now you can install the DDNS service with something like:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
ansible-playbook \
|
ansible-playbook \
|
||||||
# specify your inventory
|
|
||||||
-i ansible/inventory.yaml \
|
-i ansible/inventory.yaml \
|
||||||
# -l limits to a particular host
|
-l proxy \
|
||||||
-l 3dserver \
|
active/container_ddns/install_ddns.yaml
|
||||||
active/podman_ddns/install_ddns.yaml \
|
|
||||||
# -e brings in our secrets/records.yaml
|
|
||||||
-e "@active/podman_ddns/secrets/records.yaml"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
See ansible playbook [install_ddns.yaml](/install_ddns.yaml)
|
See ansible playbook [install_ddns.yaml](/install_ddns.yaml)
|
||||||
@@ -75,34 +69,14 @@ multiple servers. If you have a podman server, it'll have its own
|
|||||||
`podman-records.yaml`. If you have a docker server, it'll have its own
|
`podman-records.yaml`. If you have a docker server, it'll have its own
|
||||||
`docker-records.yaml`. Etc. etc.
|
`docker-records.yaml`. Etc. etc.
|
||||||
|
|
||||||
### Ansible 3D Server Records
|
### Ansible Caddy Records
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ansible-playbook \
|
ansible-playbook \
|
||||||
-i ansible/inventory.yaml \
|
-i ansible/inventory.yaml \
|
||||||
-l 3dserver \
|
-l caddy \
|
||||||
active/podman_ddns/install_ddns.yaml \
|
active/container_ddns/install_ddns.yaml \
|
||||||
-e "@active/podman_ddns/secrets/3dserver_records.yaml"
|
-e "@active/container_ddns/secrets/records.yaml"
|
||||||
```
|
|
||||||
|
|
||||||
### Ansible Unifi External Records
|
|
||||||
|
|
||||||
```bash
|
|
||||||
ansible-playbook \
|
|
||||||
-i ansible/inventory.yaml \
|
|
||||||
-l unifi-external \
|
|
||||||
active/podman_ddns/install_ddns.yaml \
|
|
||||||
-e "@active/podman_ddns/secrets/unifi_external_record.yaml"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Ansible Hostname reeselink records
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export PLAYBOOK_PATH=active/podman_ddns
|
|
||||||
ansible-playbook \
|
|
||||||
-i ansible/inventory.yaml \
|
|
||||||
${PLAYBOOK_PATH}/install_ddns.yaml \
|
|
||||||
-e "@${PLAYBOOK_PATH}/secrets/hostname_reeselink_record.yaml"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Development
|
## Development
|
||||||
@@ -1,11 +1,11 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Run ddns.{{ item.record }}.service every hour
|
Description=Run ddns.service every hour
|
||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
OnCalendar=hourly
|
OnCalendar=hourly
|
||||||
AccuracySec=10min
|
AccuracySec=10min
|
||||||
Persistent=true
|
Persistent=true
|
||||||
Unit=ddns.{{ item.record }}.service
|
Unit=ddns.service
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=timers.target
|
WantedBy=timers.target
|
||||||
59
active/container_ddns/install_ddns.yaml
Normal file
59
active/container_ddns/install_ddns.yaml
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
- name: Create DDNS Service
|
||||||
|
hosts: all
|
||||||
|
vars_files:
|
||||||
|
- secrets/vars.yaml
|
||||||
|
tasks:
|
||||||
|
- name: Create container build dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/ddns
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
- name: Copy container build files
|
||||||
|
copy:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: /tmp/ddns/
|
||||||
|
with_items:
|
||||||
|
- uv.lock
|
||||||
|
- pyproject.toml
|
||||||
|
- update.py
|
||||||
|
- Containerfile
|
||||||
|
- secrets/records.yaml
|
||||||
|
- name: Run container build
|
||||||
|
shell:
|
||||||
|
cmd: podman build -t gitea.reeseapps.com/services/ddns:latest -f ./Containerfile
|
||||||
|
chdir: /tmp/ddns/
|
||||||
|
- name: Remove container build dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/ddns
|
||||||
|
state: absent
|
||||||
|
- name: Copy ddns.network
|
||||||
|
template:
|
||||||
|
src: ddns.network
|
||||||
|
dest: /etc/containers/systemd/ddns.network
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Template DDNS Container Service
|
||||||
|
template:
|
||||||
|
src: ddns.container
|
||||||
|
dest: /etc/containers/systemd/ddns.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Template DDNS Container Timer
|
||||||
|
template:
|
||||||
|
src: ddns.timer
|
||||||
|
dest: /etc/systemd/system/ddns.timer
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Reload ddns timer
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: ddns.timer
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
|
- name: Run ddns service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: ddns.service
|
||||||
@@ -8,4 +8,6 @@ dependencies = [
|
|||||||
"boto3>=1.37.30",
|
"boto3>=1.37.30",
|
||||||
"boto3-stubs[all]>=1.38.23",
|
"boto3-stubs[all]>=1.38.23",
|
||||||
"pytest>=8.3.5",
|
"pytest>=8.3.5",
|
||||||
|
"pyyaml>=6.0.3",
|
||||||
|
"types-pyyaml>=6.0.12.20250915",
|
||||||
]
|
]
|
||||||
@@ -6,10 +6,17 @@ export ROUTE53_RECORD=something.mydomain.com
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
from typing import TYPE_CHECKING
|
import yaml
|
||||||
|
import sys
|
||||||
|
from typing import TYPE_CHECKING, TypedDict
|
||||||
|
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
try:
|
||||||
|
from yaml import CLoader as Loader
|
||||||
|
except ImportError:
|
||||||
|
from yaml import Loader # type: ignore
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from mypy_boto3_route53 import Route53Client
|
from mypy_boto3_route53 import Route53Client
|
||||||
|
|
||||||
@@ -21,10 +28,20 @@ logging.basicConfig(
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
logger.setLevel(logging.INFO)
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
HOSTED_ZONE_ID = os.getenv("HOSTED_ZONE_ID")
|
ROUTE53_RECORDS_FILE = os.getenv("ROUTE53_RECORDS_FILE")
|
||||||
ROUTE53_RECORD = os.getenv("ROUTE53_RECORD")
|
GLOBAL_SKIP_IPV4 = os.getenv("GLOBAL_SKIP_IPV4", "false").lower() == "true"
|
||||||
SKIP_IPV4 = os.getenv("SKIP_IPV4", "false").lower() == "true"
|
GLOBAL_SKIP_IPV6 = os.getenv("GLOBAL_SKIP_IPV6", "false").lower() == "true"
|
||||||
SKIP_IPV6 = os.getenv("SKIP_IPV6", "false").lower() == "true"
|
|
||||||
|
|
||||||
|
class RecordType(TypedDict):
|
||||||
|
record: str
|
||||||
|
hosted_zone_id: str
|
||||||
|
skip_ipv4: bool | None
|
||||||
|
skip_ipv6: bool | None
|
||||||
|
|
||||||
|
|
||||||
|
class RecordYamlStruct(TypedDict):
|
||||||
|
records: list[RecordType]
|
||||||
|
|
||||||
|
|
||||||
def get_ipv4() -> str:
|
def get_ipv4() -> str:
|
||||||
@@ -92,18 +109,20 @@ def update_ipv6(hosted_zone_id: str, record: str, public_ipv6: str):
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if not HOSTED_ZONE_ID:
|
if not ROUTE53_RECORDS_FILE:
|
||||||
logger.error("HOSTED_ZONE_ID env var not found!")
|
logger.error("ROUTE53_RECORDS_FILE env var not found!")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if not ROUTE53_RECORD:
|
try:
|
||||||
logger.error("ROUTE53_RECORD env var not found!")
|
with open(ROUTE53_RECORDS_FILE) as f:
|
||||||
exit(1)
|
records_file_contents: RecordYamlStruct = yaml.load(f, Loader)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.error(e)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
logger.info(f"Attempting to update {ROUTE53_RECORD} from {HOSTED_ZONE_ID}.")
|
if GLOBAL_SKIP_IPV4:
|
||||||
|
public_ipv4 = None
|
||||||
if SKIP_IPV4:
|
logger.warning("Globally skipping IPv4.")
|
||||||
logger.warning("Skipping IPv4.")
|
|
||||||
else:
|
else:
|
||||||
logger.info("Getting IPv4 address from ifconfig.me")
|
logger.info("Getting IPv4 address from ifconfig.me")
|
||||||
public_ipv4 = get_ipv4()
|
public_ipv4 = get_ipv4()
|
||||||
@@ -111,14 +130,10 @@ def main():
|
|||||||
logger.error("Public IPv4 not found.")
|
logger.error("Public IPv4 not found.")
|
||||||
exit(1)
|
exit(1)
|
||||||
logger.info(f"Public IPv4 is {public_ipv4}")
|
logger.info(f"Public IPv4 is {public_ipv4}")
|
||||||
update_ipv4(
|
|
||||||
hosted_zone_id=HOSTED_ZONE_ID,
|
|
||||||
record=ROUTE53_RECORD,
|
|
||||||
public_ipv4=public_ipv4,
|
|
||||||
)
|
|
||||||
|
|
||||||
if SKIP_IPV6:
|
if GLOBAL_SKIP_IPV6:
|
||||||
logger.warning("Skipping IPv6")
|
public_ipv6 = None
|
||||||
|
logger.warning("Globally Skipping IPv6")
|
||||||
else:
|
else:
|
||||||
logger.info("Getting IPv6 address from ifconfig.me")
|
logger.info("Getting IPv6 address from ifconfig.me")
|
||||||
public_ipv6 = get_ipv6()
|
public_ipv6 = get_ipv6()
|
||||||
@@ -126,11 +141,32 @@ def main():
|
|||||||
logger.error("Public IPv6 not found.")
|
logger.error("Public IPv6 not found.")
|
||||||
exit(1)
|
exit(1)
|
||||||
logger.info(f"Public IPv6 is {public_ipv6}")
|
logger.info(f"Public IPv6 is {public_ipv6}")
|
||||||
update_ipv6(
|
|
||||||
hosted_zone_id=HOSTED_ZONE_ID,
|
for record in records_file_contents["records"]:
|
||||||
record=ROUTE53_RECORD,
|
|
||||||
public_ipv6=public_ipv6,
|
logger.info(f"Attempting to update {record['record']} from {record['hosted_zone_id']}.")
|
||||||
)
|
|
||||||
|
if record.get("skip_ipv4"):
|
||||||
|
logger.info(f"{record['record']} requested to skip IPv4")
|
||||||
|
elif GLOBAL_SKIP_IPV4 or not public_ipv4:
|
||||||
|
logger.info("Globally skipping IPv4")
|
||||||
|
else:
|
||||||
|
update_ipv4(
|
||||||
|
hosted_zone_id=record["hosted_zone_id"],
|
||||||
|
record=record["record"],
|
||||||
|
public_ipv4=public_ipv4,
|
||||||
|
)
|
||||||
|
|
||||||
|
if record.get("skip_ipv6"):
|
||||||
|
logger.info(f"{record['record']} requested to skip IPv6")
|
||||||
|
elif GLOBAL_SKIP_IPV6 or not public_ipv6:
|
||||||
|
logger.info("Globally skipping IPv6")
|
||||||
|
else:
|
||||||
|
update_ipv6(
|
||||||
|
hosted_zone_id=record["hosted_zone_id"],
|
||||||
|
record=record["record"],
|
||||||
|
public_ipv6=public_ipv6,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@@ -480,6 +480,8 @@ dependencies = [
|
|||||||
{ name = "boto3" },
|
{ name = "boto3" },
|
||||||
{ name = "boto3-stubs", extra = ["all"] },
|
{ name = "boto3-stubs", extra = ["all"] },
|
||||||
{ name = "pytest" },
|
{ name = "pytest" },
|
||||||
|
{ name = "pyyaml" },
|
||||||
|
{ name = "types-pyyaml" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.metadata]
|
[package.metadata]
|
||||||
@@ -487,6 +489,8 @@ requires-dist = [
|
|||||||
{ name = "boto3", specifier = ">=1.37.30" },
|
{ name = "boto3", specifier = ">=1.37.30" },
|
||||||
{ name = "boto3-stubs", extras = ["all"], specifier = ">=1.38.23" },
|
{ name = "boto3-stubs", extras = ["all"], specifier = ">=1.38.23" },
|
||||||
{ name = "pytest", specifier = ">=8.3.5" },
|
{ name = "pytest", specifier = ">=8.3.5" },
|
||||||
|
{ name = "pyyaml", specifier = ">=6.0.3" },
|
||||||
|
{ name = "types-pyyaml", specifier = ">=6.0.12.20250915" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -5399,6 +5403,70 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
|
{ url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pyyaml"
|
||||||
|
version = "6.0.3"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f4/a0/39350dd17dd6d6c6507025c0e53aef67a9293a6d37d3511f23ea510d5800/pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b", size = 184227, upload-time = "2025-09-25T21:31:46.04Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/05/14/52d505b5c59ce73244f59c7a50ecf47093ce4765f116cdb98286a71eeca2/pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956", size = 174019, upload-time = "2025-09-25T21:31:47.706Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/43/f7/0e6a5ae5599c838c696adb4e6330a59f463265bfa1e116cfd1fbb0abaaae/pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8", size = 740646, upload-time = "2025-09-25T21:31:49.21Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2f/3a/61b9db1d28f00f8fd0ae760459a5c4bf1b941baf714e207b6eb0657d2578/pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198", size = 840793, upload-time = "2025-09-25T21:31:50.735Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7a/1e/7acc4f0e74c4b3d9531e24739e0ab832a5edf40e64fbae1a9c01941cabd7/pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b", size = 770293, upload-time = "2025-09-25T21:31:51.828Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/ef/abd085f06853af0cd59fa5f913d61a8eab65d7639ff2a658d18a25d6a89d/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0", size = 732872, upload-time = "2025-09-25T21:31:53.282Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1f/15/2bc9c8faf6450a8b3c9fc5448ed869c599c0a74ba2669772b1f3a0040180/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69", size = 758828, upload-time = "2025-09-25T21:31:54.807Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/a3/00/531e92e88c00f4333ce359e50c19b8d1de9fe8d581b1534e35ccfbc5f393/pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e", size = 142415, upload-time = "2025-09-25T21:31:55.885Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/2a/fa/926c003379b19fca39dd4634818b00dec6c62d87faf628d1394e137354d4/pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c", size = 158561, upload-time = "2025-09-25T21:31:57.406Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "s3transfer"
|
name = "s3transfer"
|
||||||
version = "0.11.4"
|
version = "0.11.4"
|
||||||
@@ -5468,6 +5536,15 @@ wheels = [
|
|||||||
{ url = "https://files.pythonhosted.org/packages/4c/82/1ee2e5c9d28deac086ab3a6ff07c8bc393ef013a083f546c623699881715/types_awscrt-0.27.2-py3-none-any.whl", hash = "sha256:49a045f25bbd5ad2865f314512afced933aed35ddbafc252e2268efa8a787e4e", size = 37761, upload-time = "2025-05-16T03:10:07.466Z" },
|
{ url = "https://files.pythonhosted.org/packages/4c/82/1ee2e5c9d28deac086ab3a6ff07c8bc393ef013a083f546c623699881715/types_awscrt-0.27.2-py3-none-any.whl", hash = "sha256:49a045f25bbd5ad2865f314512afced933aed35ddbafc252e2268efa8a787e4e", size = 37761, upload-time = "2025-05-16T03:10:07.466Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "types-pyyaml"
|
||||||
|
version = "6.0.12.20250915"
|
||||||
|
source = { registry = "https://pypi.org/simple" }
|
||||||
|
sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522, upload-time = "2025-09-15T03:01:00.728Z" }
|
||||||
|
wheels = [
|
||||||
|
{ url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" },
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "types-s3transfer"
|
name = "types-s3transfer"
|
||||||
version = "0.12.0"
|
version = "0.12.0"
|
||||||
41
active/container_elk/.env
Normal file
41
active/container_elk/.env
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Project namespace (defaults to the current folder name if not set)
|
||||||
|
#COMPOSE_PROJECT_NAME=myproject
|
||||||
|
|
||||||
|
|
||||||
|
# Password for the 'elastic' user (at least 6 characters)
|
||||||
|
ELASTIC_PASSWORD=changeme
|
||||||
|
|
||||||
|
|
||||||
|
# Password for the 'kibana_system' user (at least 6 characters)
|
||||||
|
KIBANA_PASSWORD=changeme
|
||||||
|
|
||||||
|
|
||||||
|
# Version of Elastic products
|
||||||
|
STACK_VERSION=8.7.1
|
||||||
|
|
||||||
|
|
||||||
|
# Set the cluster name
|
||||||
|
CLUSTER_NAME=docker-cluster
|
||||||
|
|
||||||
|
|
||||||
|
# Set to 'basic' or 'trial' to automatically start the 30-day trial
|
||||||
|
LICENSE=basic
|
||||||
|
#LICENSE=trial
|
||||||
|
|
||||||
|
|
||||||
|
# Port to expose Elasticsearch HTTP API to the host
|
||||||
|
ES_PORT=9200
|
||||||
|
|
||||||
|
|
||||||
|
# Port to expose Kibana to the host
|
||||||
|
KIBANA_PORT=5601
|
||||||
|
|
||||||
|
|
||||||
|
# Increase or decrease based on the available host memory (in bytes)
|
||||||
|
ES_MEM_LIMIT=1073741824
|
||||||
|
KB_MEM_LIMIT=1073741824
|
||||||
|
LS_MEM_LIMIT=1073741824
|
||||||
|
|
||||||
|
|
||||||
|
# SAMPLE Predefined Key only to be used in POC environments
|
||||||
|
ENCRYPTION_KEY=c34d38b3a14956121ff2170e5030b471551370178f43e5626eec58b04a30fae2
|
||||||
219
active/container_elk/elk-compose.yaml
Normal file
219
active/container_elk/elk-compose.yaml
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
certs:
|
||||||
|
driver: local
|
||||||
|
esdata01:
|
||||||
|
driver: local
|
||||||
|
kibanadata:
|
||||||
|
driver: local
|
||||||
|
metricbeatdata01:
|
||||||
|
driver: local
|
||||||
|
filebeatdata01:
|
||||||
|
driver: local
|
||||||
|
logstashdata01:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
name: elastic
|
||||||
|
external: false
|
||||||
|
|
||||||
|
|
||||||
|
services:
|
||||||
|
setup:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/elasticsearch/config/certs
|
||||||
|
user: "0"
|
||||||
|
command: >
|
||||||
|
bash -c '
|
||||||
|
if [ x${ELASTIC_PASSWORD} == x ]; then
|
||||||
|
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
|
||||||
|
exit 1;
|
||||||
|
elif [ x${KIBANA_PASSWORD} == x ]; then
|
||||||
|
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
|
||||||
|
exit 1;
|
||||||
|
fi;
|
||||||
|
if [ ! -f config/certs/ca.zip ]; then
|
||||||
|
echo "Creating CA";
|
||||||
|
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
|
||||||
|
unzip config/certs/ca.zip -d config/certs;
|
||||||
|
fi;
|
||||||
|
if [ ! -f config/certs/certs.zip ]; then
|
||||||
|
echo "Creating certs";
|
||||||
|
echo -ne \
|
||||||
|
"instances:\n"\
|
||||||
|
" - name: es01\n"\
|
||||||
|
" dns:\n"\
|
||||||
|
" - es01\n"\
|
||||||
|
" - localhost\n"\
|
||||||
|
" ip:\n"\
|
||||||
|
" - 127.0.0.1\n"\
|
||||||
|
" - name: kibana\n"\
|
||||||
|
" dns:\n"\
|
||||||
|
" - kibana\n"\
|
||||||
|
" - localhost\n"\
|
||||||
|
" ip:\n"\
|
||||||
|
" - 127.0.0.1\n"\
|
||||||
|
> config/certs/instances.yml;
|
||||||
|
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
|
||||||
|
unzip config/certs/certs.zip -d config/certs;
|
||||||
|
fi;
|
||||||
|
echo "Setting file permissions"
|
||||||
|
chown -R root:root config/certs;
|
||||||
|
find . -type d -exec chmod 750 \{\} \;;
|
||||||
|
find . -type f -exec chmod 640 \{\} \;;
|
||||||
|
echo "Waiting for Elasticsearch availability";
|
||||||
|
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
|
||||||
|
echo "Setting kibana_system password";
|
||||||
|
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
|
||||||
|
echo "All done!";
|
||||||
|
'
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
|
||||||
|
interval: 1s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 120
|
||||||
|
|
||||||
|
es01:
|
||||||
|
depends_on:
|
||||||
|
setup:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
|
||||||
|
labels:
|
||||||
|
co.elastic.logs/module: elasticsearch
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/elasticsearch/config/certs
|
||||||
|
- esdata01:/usr/share/elasticsearch/data
|
||||||
|
ports:
|
||||||
|
- ${ES_PORT}:9200
|
||||||
|
environment:
|
||||||
|
- node.name=es01
|
||||||
|
- cluster.name=${CLUSTER_NAME}
|
||||||
|
- discovery.type=single-node
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- xpack.security.enabled=true
|
||||||
|
- xpack.security.http.ssl.enabled=true
|
||||||
|
- xpack.security.http.ssl.key=certs/es01/es01.key
|
||||||
|
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
|
||||||
|
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
|
||||||
|
- xpack.security.transport.ssl.enabled=true
|
||||||
|
- xpack.security.transport.ssl.key=certs/es01/es01.key
|
||||||
|
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
|
||||||
|
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
|
||||||
|
- xpack.security.transport.ssl.verification_mode=certificate
|
||||||
|
- xpack.license.self_generated.type=${LICENSE}
|
||||||
|
mem_limit: ${ES_MEM_LIMIT}
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD-SHELL",
|
||||||
|
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
|
||||||
|
]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 120
|
||||||
|
|
||||||
|
kibana:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
|
||||||
|
labels:
|
||||||
|
co.elastic.logs/module: kibana
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/kibana/config/certs
|
||||||
|
- kibanadata:/usr/share/kibana/data
|
||||||
|
ports:
|
||||||
|
- ${KIBANA_PORT}:5601
|
||||||
|
environment:
|
||||||
|
- SERVERNAME=kibana
|
||||||
|
- ELASTICSEARCH_HOSTS=https://es01:9200
|
||||||
|
- ELASTICSEARCH_USERNAME=kibana_system
|
||||||
|
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
|
||||||
|
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
|
||||||
|
- XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||||
|
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||||
|
- XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||||
|
mem_limit: ${KB_MEM_LIMIT}
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD-SHELL",
|
||||||
|
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
|
||||||
|
]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 120
|
||||||
|
|
||||||
|
metricbeat01:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
kibana:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/beats/metricbeat:${STACK_VERSION}
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/metricbeat/certs
|
||||||
|
- metricbeatdata01:/usr/share/metricbeat/data
|
||||||
|
- "./metricbeat.yaml:/usr/share/metricbeat/metricbeat.yml:ro"
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
- "/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro"
|
||||||
|
- "/proc:/hostfs/proc:ro"
|
||||||
|
- "/:/hostfs:ro"
|
||||||
|
environment:
|
||||||
|
- ELASTIC_USER=elastic
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- ELASTIC_HOSTS=https://es01:9200
|
||||||
|
- KIBANA_HOSTS=http://kibana:5601
|
||||||
|
- LOGSTASH_HOSTS=http://logstash01:9600
|
||||||
|
|
||||||
|
filebeat01:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/beats/filebeat:${STACK_VERSION}
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/filebeat/certs
|
||||||
|
- filebeatdata01:/usr/share/filebeat/data
|
||||||
|
- "./filebeat_ingest_data/:/usr/share/filebeat/ingest_data/"
|
||||||
|
- "./filebeat.yaml:/usr/share/filebeat/filebeat.yml:ro"
|
||||||
|
- "/var/lib/docker/containers:/var/lib/docker/containers:ro"
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
environment:
|
||||||
|
- ELASTIC_USER=elastic
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- ELASTIC_HOSTS=https://es01:9200
|
||||||
|
- KIBANA_HOSTS=http://kibana:5601
|
||||||
|
- LOGSTASH_HOSTS=http://logstash01:9600
|
||||||
|
|
||||||
|
logstash01:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
kibana:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/logstash/logstash:${STACK_VERSION}
|
||||||
|
labels:
|
||||||
|
co.elastic.logs/module: logstash
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/logstash/certs
|
||||||
|
- logstashdata01:/usr/share/logstash/data
|
||||||
|
- "./logstash_ingest_data/:/usr/share/logstash/ingest_data/"
|
||||||
|
- "./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro"
|
||||||
|
environment:
|
||||||
|
- xpack.monitoring.enabled=false
|
||||||
|
- ELASTIC_USER=elastic
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- ELASTIC_HOSTS=https://es01:9200
|
||||||
14
active/container_elk/elk.md
Normal file
14
active/container_elk/elk.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Elk Stack
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
<https://www.elastic.co/blog/getting-started-with-the-elastic-stack-and-docker-compose>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy over the files
|
||||||
|
scp -rp active/container_elk/. elk:elk
|
||||||
|
# SSH into the host
|
||||||
|
ssh -t elk "cd elk ; bash --login"
|
||||||
|
# Run the services
|
||||||
|
docker compose -f elk-compose.yaml up
|
||||||
|
```
|
||||||
29
active/container_elk/filebeat.yaml
Normal file
29
active/container_elk/filebeat.yaml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
filebeat.inputs:
|
||||||
|
- type: filestream
|
||||||
|
id: default-filestream
|
||||||
|
paths:
|
||||||
|
- ingest_data/*.log
|
||||||
|
|
||||||
|
|
||||||
|
filebeat.autodiscover:
|
||||||
|
providers:
|
||||||
|
- type: docker
|
||||||
|
hints.enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
processors:
|
||||||
|
- add_docker_metadata: ~
|
||||||
|
|
||||||
|
|
||||||
|
setup.kibana:
|
||||||
|
host: ${KIBANA_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
|
||||||
|
|
||||||
|
output.elasticsearch:
|
||||||
|
hosts: ${ELASTIC_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
ssl.enabled: true
|
||||||
|
ssl.certificate_authorities: "certs/ca/ca.crt"
|
||||||
24
active/container_elk/logstash.conf
Normal file
24
active/container_elk/logstash.conf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
input {
|
||||||
|
file {
|
||||||
|
#https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html
|
||||||
|
#default is TAIL which assumes more data will come into the file.
|
||||||
|
#change to mode => "read" if the file is a compelte file. by default, the file will be removed once reading is complete -- backup your files if you need them.
|
||||||
|
mode => "tail"
|
||||||
|
path => "/usr/share/logstash/ingest_data/*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
filter {
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
output {
|
||||||
|
elasticsearch {
|
||||||
|
index => "logstash-%{+YYYY.MM.dd}"
|
||||||
|
hosts=> "${ELASTIC_HOSTS}"
|
||||||
|
user=> "${ELASTIC_USER}"
|
||||||
|
password=> "${ELASTIC_PASSWORD}"
|
||||||
|
cacert=> "certs/ca/ca.crt"
|
||||||
|
}
|
||||||
|
}
|
||||||
62
active/container_elk/metricbeat.yaml
Normal file
62
active/container_elk/metricbeat.yaml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
metricbeat.config.modules:
|
||||||
|
path: ${path.config}/modules.d/*.yml
|
||||||
|
reload.enabled: false
|
||||||
|
|
||||||
|
|
||||||
|
metricbeat.modules:
|
||||||
|
- module: elasticsearch
|
||||||
|
xpack.enabled: true
|
||||||
|
period: 10s
|
||||||
|
hosts: ${ELASTIC_HOSTS}
|
||||||
|
ssl.certificate_authorities: "certs/ca/ca.crt"
|
||||||
|
ssl.certificate: "certs/es01/es01.crt"
|
||||||
|
ssl.key: "certs/es01/es01.key"
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
ssl.enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
- module: logstash
|
||||||
|
xpack.enabled: true
|
||||||
|
period: 10s
|
||||||
|
hosts: ${LOGSTASH_HOSTS}
|
||||||
|
|
||||||
|
|
||||||
|
- module: kibana
|
||||||
|
metricsets:
|
||||||
|
- stats
|
||||||
|
period: 10s
|
||||||
|
hosts: ${KIBANA_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
xpack.enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
- module: docker
|
||||||
|
metricsets:
|
||||||
|
- "container"
|
||||||
|
- "cpu"
|
||||||
|
- "diskio"
|
||||||
|
- "healthcheck"
|
||||||
|
- "info"
|
||||||
|
#- "image"
|
||||||
|
- "memory"
|
||||||
|
- "network"
|
||||||
|
hosts: ["unix:///var/run/docker.sock"]
|
||||||
|
period: 10s
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
processors:
|
||||||
|
- add_host_metadata: ~
|
||||||
|
- add_docker_metadata: ~
|
||||||
|
|
||||||
|
|
||||||
|
output.elasticsearch:
|
||||||
|
hosts: ${ELASTIC_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
ssl:
|
||||||
|
certificate: "certs/es01/es01.crt"
|
||||||
|
certificate_authorities: "certs/ca/ca.crt"
|
||||||
|
key: "certs/es01/es01.key"
|
||||||
41
active/container_gitea/gitea-compose.yaml
Normal file
41
active/container_gitea/gitea-compose.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
networks:
|
||||||
|
gitea:
|
||||||
|
external: false
|
||||||
|
|
||||||
|
services:
|
||||||
|
server:
|
||||||
|
image: docker.gitea.com/gitea:1.25.3
|
||||||
|
container_name: gitea
|
||||||
|
environment:
|
||||||
|
- USER_UID=1001
|
||||||
|
- USER_GID=1001
|
||||||
|
- GITEA__database__DB_TYPE=postgres
|
||||||
|
- GITEA__database__HOST=db:5432
|
||||||
|
- GITEA__database__NAME=gitea
|
||||||
|
- GITEA__database__USER=gitea
|
||||||
|
- GITEA__database__PASSWD=gitea
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- /srv/gitea-data/data:/data
|
||||||
|
- /srv/gitea-data/custom:/var/lib/gitea/custom
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
- "22:22"
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
|
||||||
|
db:
|
||||||
|
image: docker.io/library/postgres:15
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=gitea
|
||||||
|
- POSTGRES_PASSWORD=gitea
|
||||||
|
- POSTGRES_DB=gitea
|
||||||
|
networks:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- /srv/gitea-db/postgres:/var/lib/postgresql/data
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
# Gitea
|
# Gitea
|
||||||
|
|
||||||
- [Gitea](#gitea)
|
- [Gitea](#gitea)
|
||||||
|
- [Gitea on Docker](#gitea-on-docker)
|
||||||
- [Gitea on Rootless Podman](#gitea-on-rootless-podman)
|
- [Gitea on Rootless Podman](#gitea-on-rootless-podman)
|
||||||
- [A note on directories](#a-note-on-directories)
|
- [A note on directories](#a-note-on-directories)
|
||||||
- [Create the gitea user](#create-the-gitea-user)
|
- [Create the gitea user](#create-the-gitea-user)
|
||||||
@@ -14,6 +15,23 @@
|
|||||||
- [Cache Cleanup](#cache-cleanup)
|
- [Cache Cleanup](#cache-cleanup)
|
||||||
- [Email Notifications](#email-notifications)
|
- [Email Notifications](#email-notifications)
|
||||||
|
|
||||||
|
## Gitea on Docker
|
||||||
|
|
||||||
|
<https://docs.gitea.com/installation/install-with-docker>
|
||||||
|
|
||||||
|
Prereqs
|
||||||
|
|
||||||
|
1. Change the default SSH port for your server to 2022 (or something similar).
|
||||||
|
2. Allow SSH to bind to that port: `semanage port -a -t ssh_port_t -p tcp 2022`
|
||||||
|
3. Allow 2022 on the firewall: `firewall-cmd --add-port=2022/tcp --permanent && firewall-cmd --reload`
|
||||||
|
4. Mount data dirs at `/srv/gitea-data` and `/srv/gitea-db`
|
||||||
|
5. Create a gitea user and update gitea-compose.yaml with the correct UID
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_gitea/gitea-compose.yaml gitea:
|
||||||
|
docker compose -f gitea-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
## Gitea on Rootless Podman
|
## Gitea on Rootless Podman
|
||||||
|
|
||||||
### A note on directories
|
### A note on directories
|
||||||
@@ -57,22 +75,22 @@ exit
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run this in Homelab, not on the server.
|
# Run this in Homelab, not on the server.
|
||||||
mkdir $(pwd)/active/podman_gitea/quadlets
|
mkdir $(pwd)/active/container_gitea/quadlets
|
||||||
|
|
||||||
# Generate the systemd service
|
# Generate the systemd service
|
||||||
podman run \
|
podman run \
|
||||||
--network none \
|
--network none \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd)/active/podman_gitea/compose:$(pwd)/active/podman_gitea/compose:z \
|
-v $(pwd)/active/container_gitea/compose:$(pwd)/active/container_gitea/compose:z \
|
||||||
-v $(pwd)/active/podman_gitea/quadlets:$(pwd)/active/podman_gitea/quadlets:z \
|
-v $(pwd)/active/container_gitea/quadlets:$(pwd)/active/container_gitea/quadlets:z \
|
||||||
quay.io/k9withabone/podlet \
|
quay.io/k9withabone/podlet \
|
||||||
-f $(pwd)/active/podman_gitea/quadlets \
|
-f $(pwd)/active/container_gitea/quadlets \
|
||||||
-i \
|
-i \
|
||||||
--overwrite \
|
--overwrite \
|
||||||
compose $(pwd)/active/podman_gitea/compose/compose.yaml
|
compose $(pwd)/active/container_gitea/compose/compose.yaml
|
||||||
|
|
||||||
# Copy the files to the server
|
# Copy the files to the server
|
||||||
scp -r $(pwd)/active/podman_gitea/quadlets/. 3dserver:/home/gitea/.config/containers/systemd/
|
scp -r $(pwd)/active/container_gitea/quadlets/. 3dserver:/home/gitea/.config/containers/systemd/
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Quadlets
|
### Install Quadlets
|
||||||
@@ -112,7 +130,7 @@ systemctl --user enable --now podman-auto-update.timer
|
|||||||
```bash
|
```bash
|
||||||
# Upload quadlets and restart
|
# Upload quadlets and restart
|
||||||
export PODMAN_SERVER=3dserver
|
export PODMAN_SERVER=3dserver
|
||||||
scp -r active/podman_gitea/quadlets/. $PODMAN_SERVER:/home/gitea/.config/containers/systemd/
|
scp -r active/container_gitea/quadlets/. $PODMAN_SERVER:/home/gitea/.config/containers/systemd/
|
||||||
ssh $PODMAN_SERVER chown -R gitea:gitea /home/gitea/.config/containers/systemd/
|
ssh $PODMAN_SERVER chown -R gitea:gitea /home/gitea/.config/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER
|
ssh $PODMAN_SERVER
|
||||||
@@ -141,8 +159,6 @@ you have.
|
|||||||
### Install
|
### Install
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
touch config.yaml
|
|
||||||
|
|
||||||
export GITEA_TOKEN=
|
export GITEA_TOKEN=
|
||||||
docker run \
|
docker run \
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
@@ -197,6 +213,10 @@ docker builder prune -a
|
|||||||
|
|
||||||
To run it every day at noon: `crontab -e`
|
To run it every day at noon: `crontab -e`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dnf install cronie cronie-anacron
|
||||||
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
0 12 * * * yes | docker builder prune -a
|
0 12 * * * yes | docker builder prune -a
|
||||||
0 12 * * * docker image prune -a -f
|
0 12 * * * docker image prune -a -f
|
||||||
16
active/container_gitlab/gitlab-compose.yaml
Normal file
16
active/container_gitlab/gitlab-compose.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
services:
|
||||||
|
gitlab:
|
||||||
|
image: gitlab/gitlab-ce:18.7.3-ce.0
|
||||||
|
container_name: gitlab
|
||||||
|
restart: always
|
||||||
|
hostname: 'gitlab.example.com'
|
||||||
|
ports:
|
||||||
|
- '80:80'
|
||||||
|
- '443:443'
|
||||||
|
- '22:22'
|
||||||
|
volumes:
|
||||||
|
- '$GITLAB_HOME/gitlab.rb:/etc/gitlab/gitlab.rb:ro'
|
||||||
|
- '$GITLAB_HOME/config:/etc/gitlab'
|
||||||
|
- '$GITLAB_HOME/logs:/var/log/gitlab'
|
||||||
|
- '$GITLAB_HOME/data:/var/opt/gitlab'
|
||||||
|
shm_size: '256m'
|
||||||
66
active/container_gitlab/gitlab.md
Normal file
66
active/container_gitlab/gitlab.md
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Gitlab
|
||||||
|
|
||||||
|
## Docker Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Change the server's SSH port in /etc/ssh/sshd_config
|
||||||
|
Port = 2424
|
||||||
|
|
||||||
|
# Then tell selinux about it
|
||||||
|
semanage port -a -t ssh_port_t -p tcp 2424
|
||||||
|
# And add the firewall rule
|
||||||
|
firewall-cmd --add-port=2424/tcp --permanent
|
||||||
|
firewall-cmd --reload
|
||||||
|
# Reload SSH
|
||||||
|
systemctl restart sshd
|
||||||
|
|
||||||
|
# Make a Gitlab directory
|
||||||
|
mkdir -p /srv/gitlab
|
||||||
|
|
||||||
|
# Add the following to .bashrc (used in the compose file)
|
||||||
|
export GITLAB_HOME=/srv/gitlab
|
||||||
|
```
|
||||||
|
|
||||||
|
Create your `gitlab-compose.yaml`. See the file in this repo for an example.
|
||||||
|
|
||||||
|
Also create the file `secrets/gitlab.rb` with your configuration. Should look something like this:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
# Add any other gitlab.rb configuration here, each on its own line
|
||||||
|
external_url 'https://gitlab.reeseapps.com'
|
||||||
|
nginx['listen_port'] = 80
|
||||||
|
nginx['listen_https'] = false
|
||||||
|
nginx['proxy_set_headers'] = {
|
||||||
|
"X-Forwarded-Proto" => "https",
|
||||||
|
"X-Forwarded-Ssl" => "on",
|
||||||
|
"Host" => "gitlab.mydomain.de",
|
||||||
|
"X-Real-IP" => "$$remote_addr",
|
||||||
|
"X-Forwarded-For" => "$$proxy_add_x_forwarded_for",
|
||||||
|
"Upgrade" => "$$http_upgrade",
|
||||||
|
"Connection" => "$$connection_upgrade"
|
||||||
|
}
|
||||||
|
gitlab_rails['smtp_enable'] = true
|
||||||
|
gitlab_rails['smtp_address'] = "email-smtp.us-east-1.amazonaws.com"
|
||||||
|
gitlab_rails['smtp_port'] = 465
|
||||||
|
gitlab_rails['smtp_user_name'] = ""
|
||||||
|
gitlab_rails['smtp_password'] = ""
|
||||||
|
gitlab_rails['smtp_domain'] = ""
|
||||||
|
gitlab_rails['smtp_authentication'] = "login"
|
||||||
|
gitlab_rails['smtp_ssl'] = true
|
||||||
|
gitlab_rails['smtp_force_ssl'] = true
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy `gitlab.rb` and `gitlab-compose.yaml` to your server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_gitlab/gitlab-compose.yaml gitlab:
|
||||||
|
scp active/container_gitlab/secrets/gitlab.rb gitlab:/srv/gitlab
|
||||||
|
```
|
||||||
|
|
||||||
|
Then docker compose up:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose -f gitlab-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
The initial username is root and the password will be at `/srv/gitlab/config/initial_root_password`.
|
||||||
@@ -13,14 +13,14 @@ podman run \
|
|||||||
--name=grafana \
|
--name=grafana \
|
||||||
--volume grafana-storage:/var/lib/grafana \
|
--volume grafana-storage:/var/lib/grafana \
|
||||||
--network=systemd-graphite \
|
--network=systemd-graphite \
|
||||||
grafana/grafana-enterprise > active/podman_grafana/grafana.container
|
grafana/grafana-enterprise > active/container_grafana/grafana.container
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy the graphite.container and graphite.network file to the server you want to run it on
|
Copy the graphite.container and graphite.network file to the server you want to run it on
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PODMAN_SERVER=
|
export PODMAN_SERVER=
|
||||||
scp active/podman_grafana/grafana.container $PODMAN_SERVER:/etc/containers/systemd/
|
scp active/container_grafana/grafana.container $PODMAN_SERVER:/etc/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER systemctl daemon-reload
|
ssh $PODMAN_SERVER systemctl daemon-reload
|
||||||
ssh $PODMAN_SERVER systemctl enable --now grafana.service
|
ssh $PODMAN_SERVER systemctl enable --now grafana.service
|
||||||
@@ -7,7 +7,7 @@
|
|||||||
```bash
|
```bash
|
||||||
# Generate the network
|
# Generate the network
|
||||||
podman run ghcr.io/containers/podlet --description Graphite \
|
podman run ghcr.io/containers/podlet --description Graphite \
|
||||||
podman network create --ipv6 graphite > active/podman_graphite/graphite.network
|
podman network create --ipv6 graphite > active/container_graphite/graphite.network
|
||||||
|
|
||||||
# Generate the systemd container service
|
# Generate the systemd container service
|
||||||
podman run ghcr.io/containers/podlet --description Graphite \
|
podman run ghcr.io/containers/podlet --description Graphite \
|
||||||
@@ -23,15 +23,15 @@ podman run \
|
|||||||
-v graphite_configs:/opt/graphite/conf \
|
-v graphite_configs:/opt/graphite/conf \
|
||||||
-v graphite_data:/opt/graphite/storage \
|
-v graphite_data:/opt/graphite/storage \
|
||||||
-v graphite_statsd_config:/opt/statsd/config \
|
-v graphite_statsd_config:/opt/statsd/config \
|
||||||
ghcr.io/deniszh/graphite-statsd > active/podman_graphite/graphite.container
|
ghcr.io/deniszh/graphite-statsd > active/container_graphite/graphite.container
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy the graphite.container and graphite.network file to the server you want to run it on
|
Copy the graphite.container and graphite.network file to the server you want to run it on
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PODMAN_SERVER=
|
export PODMAN_SERVER=
|
||||||
scp active/podman_graphite/graphite.network $PODMAN_SERVER:/etc/containers/systemd/
|
scp active/container_graphite/graphite.network $PODMAN_SERVER:/etc/containers/systemd/
|
||||||
scp active/podman_graphite/graphite.container $PODMAN_SERVER:/etc/containers/systemd/
|
scp active/container_graphite/graphite.container $PODMAN_SERVER:/etc/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER systemctl daemon-reload
|
ssh $PODMAN_SERVER systemctl daemon-reload
|
||||||
ssh $PODMAN_SERVER systemctl start graphite.network
|
ssh $PODMAN_SERVER systemctl start graphite.network
|
||||||
@@ -2,7 +2,8 @@
|
|||||||
|
|
||||||
- [Podman immich](#podman-immich)
|
- [Podman immich](#podman-immich)
|
||||||
- [Setup immich Project](#setup-immich-project)
|
- [Setup immich Project](#setup-immich-project)
|
||||||
- [Install immich](#install-immich)
|
- [Install immich with Docker](#install-immich-with-docker)
|
||||||
|
- [Install immich with Rootless Podman](#install-immich-with-rootless-podman)
|
||||||
- [Create the immich user](#create-the-immich-user)
|
- [Create the immich user](#create-the-immich-user)
|
||||||
- [Write the immich compose spec](#write-the-immich-compose-spec)
|
- [Write the immich compose spec](#write-the-immich-compose-spec)
|
||||||
- [A Note on Volumes](#a-note-on-volumes)
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
@@ -21,7 +22,7 @@
|
|||||||
|
|
||||||
## Setup immich Project
|
## Setup immich Project
|
||||||
|
|
||||||
- [x] Copy and rename this folder to active/podman_immich
|
- [x] Copy and rename this folder to active/container_immich
|
||||||
- [x] Find and replace immich with the name of the service.
|
- [x] Find and replace immich with the name of the service.
|
||||||
- [x] Create the rootless user to run the podman containers
|
- [x] Create the rootless user to run the podman containers
|
||||||
- [ ] Write the compose.yaml spec for your service
|
- [ ] Write the compose.yaml spec for your service
|
||||||
@@ -30,7 +31,19 @@
|
|||||||
- [ ] Expose the quadlet service
|
- [ ] Expose the quadlet service
|
||||||
- [ ] Install a backup service and timer
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
## Install immich
|
## Install immich with Docker
|
||||||
|
|
||||||
|
<https://docs.immich.app/install/docker-compose/>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_immich/release-compose.yaml immich:
|
||||||
|
scp active/container_immich/release-env immich:.env
|
||||||
|
|
||||||
|
mkdir /srv/immich
|
||||||
|
docker compose -f release-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install immich with Rootless Podman
|
||||||
|
|
||||||
### Create the immich user
|
### Create the immich user
|
||||||
|
|
||||||
@@ -49,10 +62,10 @@ mkdir -p /home/immich/.config/containers/systemd
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Pull the compose file
|
# Pull the compose file
|
||||||
wget -O active/podman_immich/release-compose.yaml https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
wget -O active/container_immich/release-compose.yaml https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||||
|
|
||||||
# Pull the .env file
|
# Pull the .env file
|
||||||
wget -O active/podman_immich/release-env https://github.com/immich-app/immich/releases/latest/download/example.env
|
wget -O active/container_immich/release-env https://github.com/immich-app/immich/releases/latest/download/example.env
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Edit the compose.yaml. Replace all environment variables with their correct values.
|
2. Edit the compose.yaml. Replace all environment variables with their correct values.
|
||||||
@@ -71,8 +84,8 @@ Run the following to convert a compose.yaml into the various `.container` files
|
|||||||
podman run \
|
podman run \
|
||||||
--security-opt label=disable \
|
--security-opt label=disable \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd)/active/podman_immich/compose:/compose \
|
-v $(pwd)/active/container_immich/compose:/compose \
|
||||||
-v $(pwd)/active/podman_immich/quadlets:/quadlets \
|
-v $(pwd)/active/container_immich/quadlets:/quadlets \
|
||||||
quay.io/k9withabone/podlet \
|
quay.io/k9withabone/podlet \
|
||||||
-f /quadlets \
|
-f /quadlets \
|
||||||
-i \
|
-i \
|
||||||
@@ -81,7 +94,7 @@ compose /compose/compose.yaml
|
|||||||
|
|
||||||
# Copy the files to the server
|
# Copy the files to the server
|
||||||
export PODMAN_SERVER=3dserver
|
export PODMAN_SERVER=3dserver
|
||||||
scp -r active/podman_immich/quadlets/. $PODMAN_SERVER:/home/immich/.config/containers/systemd/
|
scp -r active/container_immich/quadlets/. $PODMAN_SERVER:/home/immich/.config/containers/systemd/
|
||||||
ssh $PODMAN_SERVER chown -R immich:immich /home/immich/.config/containers/systemd/
|
ssh $PODMAN_SERVER chown -R immich:immich /home/immich/.config/containers/systemd/
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -109,8 +122,8 @@ systemctl --user enable --now podman-auto-update.timer
|
|||||||
|
|
||||||
### Expose immich
|
### Expose immich
|
||||||
|
|
||||||
1. If you need a domain, follow the [DDNS instructions](/active/podman_ddns/ddns.md#install-a-new-ddns-service)
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
2. For a web service, follow the [Caddy instructions](/active/podman_caddy/caddy.md#adding-a-new-caddy-record)
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
#### firewalld
|
#### firewalld
|
||||||
@@ -157,7 +170,7 @@ ssh immich systemctl --user restart immich
|
|||||||
npm i -g @immich/cli
|
npm i -g @immich/cli
|
||||||
|
|
||||||
# immich login [url] [key]
|
# immich login [url] [key]
|
||||||
immich login http://192.168.1.216:2283/api HFEJ38DNSDUEG
|
immich login http://192.168.1.216:2283/api <key here>
|
||||||
|
|
||||||
# Check the upload
|
# Check the upload
|
||||||
immich upload --dry-run --recursive directory/
|
immich upload --dry-run --recursive directory/
|
||||||
@@ -21,7 +21,7 @@ services:
|
|||||||
- ${UPLOAD_LOCATION}:/data
|
- ${UPLOAD_LOCATION}:/data
|
||||||
- /etc/localtime:/etc/localtime:ro
|
- /etc/localtime:/etc/localtime:ro
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- /root/.env
|
||||||
ports:
|
ports:
|
||||||
- '2283:2283'
|
- '2283:2283'
|
||||||
depends_on:
|
depends_on:
|
||||||
@@ -42,14 +42,14 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- model-cache:/cache
|
- model-cache:/cache
|
||||||
env_file:
|
env_file:
|
||||||
- .env
|
- /root/.env
|
||||||
restart: always
|
restart: always
|
||||||
healthcheck:
|
healthcheck:
|
||||||
disable: false
|
disable: false
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
container_name: immich_redis
|
container_name: immich_redis
|
||||||
image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
|
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: redis-cli ping || exit 1
|
test: redis-cli ping || exit 1
|
||||||
restart: always
|
restart: always
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
# You can find documentation for all the supported env variables at https://docs.immich.app/install/environment-variables
|
# You can find documentation for all the supported env variables at https://docs.immich.app/install/environment-variables
|
||||||
|
|
||||||
# The location where your uploaded files are stored
|
# The location where your uploaded files are stored
|
||||||
UPLOAD_LOCATION=./library
|
UPLOAD_LOCATION=/srv/immich-data/library
|
||||||
|
|
||||||
# The location where your database files are stored. Network shares are not supported for the database
|
# The location where your database files are stored. Network shares are not supported for the database
|
||||||
DB_DATA_LOCATION=./postgres
|
DB_DATA_LOCATION=/srv/immich-db/postgres
|
||||||
|
|
||||||
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||||
# TZ=Etc/UTC
|
# TZ=Etc/UTC
|
||||||
17
active/container_jellyfin/jellyfin-compose.yaml
Normal file
17
active/container_jellyfin/jellyfin-compose.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
services:
|
||||||
|
jellyfin:
|
||||||
|
image: jellyfin/jellyfin
|
||||||
|
container_name: jellyfin
|
||||||
|
ports:
|
||||||
|
- 8096:8096/tcp
|
||||||
|
- 7359:7359/udp
|
||||||
|
volumes:
|
||||||
|
- /srv/jellyfin/config:/config
|
||||||
|
- /srv/jellyfin/cache:/cache
|
||||||
|
- type: bind
|
||||||
|
source: /mnt/media
|
||||||
|
target: /media
|
||||||
|
read_only: true
|
||||||
|
restart: 'always'
|
||||||
|
environment:
|
||||||
|
- JELLYFIN_PublishedServerUrl=https://jellyfin.reeseapps.com
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
# Jellyfin
|
# Jellyfin
|
||||||
|
|
||||||
- [Jellyfin](#jellyfin)
|
- [Jellyfin](#jellyfin)
|
||||||
- [Install](#install)
|
- [Docker Install](#docker-install)
|
||||||
|
- [Rootless Podman Install](#rootless-podman-install)
|
||||||
- [Upgrade](#upgrade)
|
- [Upgrade](#upgrade)
|
||||||
- [Mounting Media Directory](#mounting-media-directory)
|
- [Mounting Media Directory](#mounting-media-directory)
|
||||||
|
|
||||||
@@ -9,7 +10,11 @@ They have podman rootless instructions!
|
|||||||
|
|
||||||
<https://jellyfin.org/docs/general/installation/container/#managing-via-systemd>
|
<https://jellyfin.org/docs/general/installation/container/#managing-via-systemd>
|
||||||
|
|
||||||
## Install
|
## Docker Install
|
||||||
|
|
||||||
|
<https://jellyfin.org/docs/general/installation/container>
|
||||||
|
|
||||||
|
## Rootless Podman Install
|
||||||
|
|
||||||
1. Create the jellyfin user
|
1. Create the jellyfin user
|
||||||
|
|
||||||
@@ -27,7 +32,7 @@ They have podman rootless instructions!
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PODMAN_SERVER=3dserver
|
export PODMAN_SERVER=3dserver
|
||||||
scp -r active/podman_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
|
scp -r active/container_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
|
||||||
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
|
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER
|
ssh $PODMAN_SERVER
|
||||||
@@ -45,7 +50,7 @@ They have podman rootless instructions!
|
|||||||
```bash
|
```bash
|
||||||
# Upload quadlets and restart
|
# Upload quadlets and restart
|
||||||
export PODMAN_SERVER=3dserver
|
export PODMAN_SERVER=3dserver
|
||||||
scp -r active/podman_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
|
scp -r active/container_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
|
||||||
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
|
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER
|
ssh $PODMAN_SERVER
|
||||||
19
active/container_keycloak/compose/compose.yaml
Normal file
19
active/container_keycloak/compose/compose.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
services:
|
||||||
|
keycloak:
|
||||||
|
container_name: keycloak
|
||||||
|
restart: always
|
||||||
|
image: quay.io/keycloak/keycloak:26.4.5
|
||||||
|
ports:
|
||||||
|
- "9443:443"
|
||||||
|
- "8443:8443"
|
||||||
|
volumes:
|
||||||
|
- /home/foobar/data:/var/app/data
|
||||||
|
security_opt:
|
||||||
|
- label=disable
|
||||||
|
userns_mode: keep-id
|
||||||
|
command:
|
||||||
|
- "start"
|
||||||
|
- "--hostname"
|
||||||
|
- "https://keycloak.reeseapps.com"
|
||||||
|
- "--hostname-admin"
|
||||||
|
- "https://keycloak.reeselink.com:8443"
|
||||||
186
active/container_keycloak/keycloak.md
Normal file
186
active/container_keycloak/keycloak.md
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# Podman keycloak
|
||||||
|
|
||||||
|
- [Podman keycloak](#podman-keycloak)
|
||||||
|
- [Setup keycloak Project](#setup-keycloak-project)
|
||||||
|
- [Install Keycloak with Docker](#install-keycloak-with-docker)
|
||||||
|
- [Install Keycloak with Podman](#install-keycloak-with-podman)
|
||||||
|
- [Create the keycloak user](#create-the-keycloak-user)
|
||||||
|
- [Write the keycloak compose spec](#write-the-keycloak-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert keycloak compose spec to quadlets](#convert-keycloak-compose-spec-to-quadlets)
|
||||||
|
- [Create any container-mounted directories](#create-any-container-mounted-directories)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose keycloak](#expose-keycloak)
|
||||||
|
- [firewalld](#firewalld)
|
||||||
|
- [Backup keycloak](#backup-keycloak)
|
||||||
|
- [Upgrade keycloak](#upgrade-keycloak)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup keycloak Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_keycloak
|
||||||
|
- [ ] Find and replace keycloak with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install Keycloak with Docker
|
||||||
|
|
||||||
|
<https://www.keycloak.org/getting-started/getting-started-docker>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test in dev mode
|
||||||
|
docker run -p 8080:8080 -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=admin quay.io/keycloak/keycloak:26.4.7 start-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install Keycloak with Podman
|
||||||
|
|
||||||
|
### Create the keycloak user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd keycloak
|
||||||
|
loginctl enable-linger $(id -u keycloak)
|
||||||
|
systemctl --user --machine=keycloak@.host enable podman-restart
|
||||||
|
systemctl --user --machine=keycloak@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/keycloak/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the keycloak compose spec
|
||||||
|
|
||||||
|
<https://www.keycloak.org/getting-started/getting-started-podman>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Based on the example
|
||||||
|
podman run \
|
||||||
|
-p 127.0.0.1:8080:8080 \
|
||||||
|
-e KC_BOOTSTRAP_ADMIN_USERNAME=admin \
|
||||||
|
-e KC_BOOTSTRAP_ADMIN_PASSWORD=admin \
|
||||||
|
quay.io/keycloak/keycloak:26.4.5 start-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/keycloak/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert keycloak compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_keycloak/:/compose \
|
||||||
|
-v $(pwd)/active/container_keycloak/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r active/container_keycloak/quadlets/. $PODMAN_SERVER:/home/keycloak/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R keycloak:keycloak /home/keycloak/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create any container-mounted directories
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell keycloak@
|
||||||
|
podman unshare
|
||||||
|
mkdir some_volume
|
||||||
|
# Chown to the namespaced user with UID 1000
|
||||||
|
# This will be some really obscure UID outside the namespace
|
||||||
|
# This will also solve most permission denied errors
|
||||||
|
chown -R 1000:1000 some_volume
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell keycloak@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart keycloak
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose keycloak
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
#### firewalld
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# command to get current active zone and default zone
|
||||||
|
firewall-cmd --get-active-zones
|
||||||
|
firewall-cmd --get-default-zone
|
||||||
|
|
||||||
|
# command to open 443 on tcp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
|
||||||
|
|
||||||
|
# command to open 80 and 443 on tcp and udp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
|
||||||
|
|
||||||
|
# command to list available services and then open http and https
|
||||||
|
firewall-cmd --get-services
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup keycloak
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade keycloak
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-keycloak-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/keycloak/.config/containers/systemd/
|
||||||
|
ssh keycloak systemctl --user daemon-reload
|
||||||
|
ssh keycloak systemctl --user restart keycloak
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user keycloak is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel keycloak
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
3
active/container_litellm/compose/README.md
Normal file
3
active/container_litellm/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
37
active/container_litellm/compose/compose.yaml
Normal file
37
active/container_litellm/compose/compose.yaml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
services:
|
||||||
|
litellm:
|
||||||
|
image: docker.litellm.ai/berriai/litellm:main-latest
|
||||||
|
ports:
|
||||||
|
- 4000:4000
|
||||||
|
env_file: /home/ai/litellm.env
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: "postgresql://llmproxy:dbpassword9090@host.containers.internal:5432/litellm"
|
||||||
|
STORE_MODEL_IN_DB: "True"
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- litellm-db # Indicates that this service depends on the 'litellm-db' service, ensuring 'litellm-db' starts first
|
||||||
|
healthcheck: # Defines the health check configuration for the container
|
||||||
|
test:
|
||||||
|
- CMD-SHELL
|
||||||
|
- python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')" # Command to execute for health check
|
||||||
|
interval: 30s # Perform health check every 30 seconds
|
||||||
|
timeout: 10s # Health check command times out after 10 seconds
|
||||||
|
retries: 3 # Retry up to 3 times if health check fails
|
||||||
|
start_period: 40s # Wait 40 seconds after container start before beginning health checks
|
||||||
|
|
||||||
|
litellm-db:
|
||||||
|
image: docker.io/postgres:16
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: litellm
|
||||||
|
POSTGRES_USER: llmproxy
|
||||||
|
POSTGRES_PASSWORD: dbpassword9090
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- litellm_postgres_data:/var/lib/postgresql/data:z
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
|
||||||
|
interval: 1s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 10
|
||||||
67
active/container_litellm/config.yaml
Normal file
67
active/container_litellm/config.yaml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# General settings
|
||||||
|
|
||||||
|
general_settings:
|
||||||
|
request_timeout: 600
|
||||||
|
|
||||||
|
# Models
|
||||||
|
model_list:
|
||||||
|
# Qwen3.5-35B variants
|
||||||
|
- model_name: qwen3.5-35b-think-general
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 1.0
|
||||||
|
top_p: 0.95
|
||||||
|
presence_penalty: 1.5
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: true
|
||||||
|
|
||||||
|
- model_name: qwen3.5-35b-think-code
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 0.6
|
||||||
|
top_p: 0.95
|
||||||
|
presence_penalty: 0.0
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: true
|
||||||
|
|
||||||
|
- model_name: qwen3.5-35b-instruct-general
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 0.7
|
||||||
|
top_p: 0.8
|
||||||
|
presence_penalty: 1.5
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: false
|
||||||
|
|
||||||
|
- model_name: qwen3.5-35b-instruct-reasoning
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 1.0
|
||||||
|
top_p: 0.95
|
||||||
|
presence_penalty: 1.5
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: false
|
||||||
233
active/container_litellm/litellm.md
Normal file
233
active/container_litellm/litellm.md
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
# Podman litellm
|
||||||
|
|
||||||
|
- [Podman litellm](#podman-litellm)
|
||||||
|
- [Setup litellm Project](#setup-litellm-project)
|
||||||
|
- [Install litellm](#install-litellm)
|
||||||
|
- [Create the ai user](#create-the-ai-user)
|
||||||
|
- [Write the litellm compose spec](#write-the-litellm-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert litellm compose spec to quadlets](#convert-litellm-compose-spec-to-quadlets)
|
||||||
|
- [Create the litellm.env file](#create-the-litellmenv-file)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose litellm](#expose-litellm)
|
||||||
|
- [Using LiteLLM](#using-litellm)
|
||||||
|
- [Adding Models](#adding-models)
|
||||||
|
- [Testing Models](#testing-models)
|
||||||
|
- [Backup litellm](#backup-litellm)
|
||||||
|
- [Upgrade litellm](#upgrade-litellm)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup litellm Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_litellm
|
||||||
|
- [ ] Find and replace litellm with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install litellm
|
||||||
|
|
||||||
|
### Create the ai user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd ai
|
||||||
|
loginctl enable-linger $(id -u ai)
|
||||||
|
systemctl --user --machine=ai@.host enable podman-restart
|
||||||
|
systemctl --user --machine=ai@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/ai/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the litellm compose spec
|
||||||
|
|
||||||
|
See the [docker run command here](https://docs.litellm.ai/docs/proxy/docker_quick_start#32-start-proxy)
|
||||||
|
|
||||||
|
Edit the compose.yaml at active/container_litellm/compose/compose.yaml
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/litellm/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert litellm compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_litellm/compose:/compose \
|
||||||
|
-v $(pwd)/active/container_litellm/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=ai-ai
|
||||||
|
scp -r active/container_litellm/quadlets/. $PODMAN_SERVER:/home/ai/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create the litellm.env file
|
||||||
|
|
||||||
|
Should look something like:
|
||||||
|
|
||||||
|
```env
|
||||||
|
LITELLM_MASTER_KEY="random-string"
|
||||||
|
LITELLM_SALT_KEY="random-string"
|
||||||
|
|
||||||
|
UI_USERNAME="admin"
|
||||||
|
UI_PASSWORD="random-string"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then copy it to the server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=ai
|
||||||
|
scp -r active/container_litellm/config.yaml $PODMAN_SERVER:/home/ai/litellm_config.yaml
|
||||||
|
ssh $PODMAN_SERVER chown -R ai:ai /home/ai/litellm_config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh ai
|
||||||
|
machinectl shell ai@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart litellm
|
||||||
|
journalctl --user -u litellm -f
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose litellm
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
## Using LiteLLM
|
||||||
|
|
||||||
|
### Adding Models
|
||||||
|
|
||||||
|
```json
|
||||||
|
// qwen3.5-35b-a3b-thinking
|
||||||
|
{
|
||||||
|
"temperature": 1,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-coding
|
||||||
|
{
|
||||||
|
"temperature": 0.6,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-instruct
|
||||||
|
{
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Models
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List models
|
||||||
|
curl -L -X GET 'https://aipi.reeseapps.com/v1/models' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234'
|
||||||
|
|
||||||
|
curl -L -X POST 'https://aipi.reeseapps.com/v1/chat/completions' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234' \
|
||||||
|
-d '{
|
||||||
|
"model": "gpt-4o-mini", # 👈 REPLACE with 'public model name' for any db-model
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"content": "Hey, how's it going",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup litellm
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade litellm
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-litellm-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/litellm/.config/containers/systemd/
|
||||||
|
ssh litellm systemctl --user daemon-reload
|
||||||
|
ssh litellm systemctl --user restart litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user litellm is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
15
active/container_litellm/quadlets/litellm-db.container
Normal file
15
active/container_litellm/quadlets/litellm-db.container
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[Container]
|
||||||
|
Environment=POSTGRES_DB=litellm POSTGRES_USER=llmproxy POSTGRES_PASSWORD=dbpassword9090
|
||||||
|
HealthCmd='pg_isready -d litellm -U llmproxy'
|
||||||
|
HealthInterval=1s
|
||||||
|
HealthRetries=10
|
||||||
|
HealthTimeout=5s
|
||||||
|
Image=docker.io/postgres:16
|
||||||
|
PublishPort=5432:5432
|
||||||
|
Volume=litellm_postgres_data:/var/lib/postgresql/data:z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
19
active/container_litellm/quadlets/litellm.container
Normal file
19
active/container_litellm/quadlets/litellm.container
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[Unit]
|
||||||
|
Requires=litellm-db.service
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
Environment=DATABASE_URL=postgresql://llmproxy:dbpassword9090@host.containers.internal:5432/litellm STORE_MODEL_IN_DB=True
|
||||||
|
EnvironmentFile=/home/ai/litellm.env
|
||||||
|
HealthCmd="python3 -c \"import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')\""
|
||||||
|
HealthInterval=30s
|
||||||
|
HealthRetries=3
|
||||||
|
HealthStartPeriod=40s
|
||||||
|
HealthTimeout=10s
|
||||||
|
Image=docker.litellm.ai/berriai/litellm:main-latest
|
||||||
|
PublishPort=4000:4000
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
- [Local AI with Anything LLM](#local-ai-with-anything-llm)
|
- [Local AI with Anything LLM](#local-ai-with-anything-llm)
|
||||||
- [Useful links I keep losing](#useful-links-i-keep-losing)
|
- [Useful links I keep losing](#useful-links-i-keep-losing)
|
||||||
|
- [Podman](#podman)
|
||||||
- [Running Local AI on Ubuntu 24.04 with Nvidia GPU](#running-local-ai-on-ubuntu-2404-with-nvidia-gpu)
|
- [Running Local AI on Ubuntu 24.04 with Nvidia GPU](#running-local-ai-on-ubuntu-2404-with-nvidia-gpu)
|
||||||
- [Running Local AI on Arch with AMD GPU](#running-local-ai-on-arch-with-amd-gpu)
|
- [Running Local AI on Arch with AMD GPU](#running-local-ai-on-arch-with-amd-gpu)
|
||||||
- [Running Anything LLM](#running-anything-llm)
|
- [Running Anything LLM](#running-anything-llm)
|
||||||
@@ -32,6 +33,12 @@
|
|||||||
- [Example model config files from gallery](https://github.com/mudler/LocalAI/tree/master/gallery)
|
- [Example model config files from gallery](https://github.com/mudler/LocalAI/tree/master/gallery)
|
||||||
- [List of all available models](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml)
|
- [List of all available models](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml)
|
||||||
|
|
||||||
|
## Podman
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -ti --name local-ai -p 8081:8080 --device=/dev/kfd --device=/dev/dri --group-add=video --replace localai/localai:latest-gpu-vulkan
|
||||||
|
```
|
||||||
|
|
||||||
## Running Local AI on Ubuntu 24.04 with Nvidia GPU
|
## Running Local AI on Ubuntu 24.04 with Nvidia GPU
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -124,7 +131,7 @@ pipx install "huggingface_hub[cli]"
|
|||||||
podman network create --ipv6 --label local-ai local-ai
|
podman network create --ipv6 --label local-ai local-ai
|
||||||
|
|
||||||
# You might want to mount an external drive here.
|
# You might want to mount an external drive here.
|
||||||
export MODEL_DIR=/models
|
export MODEL_DIR=/srv/models
|
||||||
mkdir -p $MODEL_DIR
|
mkdir -p $MODEL_DIR
|
||||||
|
|
||||||
# LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one
|
# LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one
|
||||||
@@ -136,14 +143,13 @@ mkdir -p $MODEL_DIR
|
|||||||
podman run \
|
podman run \
|
||||||
-d \
|
-d \
|
||||||
-p 8080:8080 \
|
-p 8080:8080 \
|
||||||
-e LOCALAI_API_KEY=$(cat ~/.localai/token) \
|
|
||||||
-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \
|
-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \
|
||||||
--device /dev/dri \
|
--device /dev/dri \
|
||||||
--device /dev/kfd \
|
--device /dev/kfd \
|
||||||
--name local-ai \
|
--name local-ai \
|
||||||
--network local-ai \
|
--replace \
|
||||||
-v $MODEL_DIR:/build/models \
|
-v $MODEL_DIR:/build/models:z \
|
||||||
-v localai-tmp:/tmp/generated \
|
-v localai-tmp:/tmp/generated:z \
|
||||||
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
||||||
|
|
||||||
# The second (8081) will be our frontend. We'll protect it with basic auth.
|
# The second (8081) will be our frontend. We'll protect it with basic auth.
|
||||||
@@ -153,9 +159,9 @@ podman run \
|
|||||||
-d \
|
-d \
|
||||||
-p 8081:8080 \
|
-p 8081:8080 \
|
||||||
--name local-ai-webui \
|
--name local-ai-webui \
|
||||||
--network local-ai \
|
--replace \
|
||||||
-v $MODEL_DIR:/build/models \
|
-v $MODEL_DIR:/build/models:z \
|
||||||
-v localai-tmp:/tmp/generated \
|
-v localai-tmp:/tmp/generated:z \
|
||||||
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
||||||
```
|
```
|
||||||
|
|
||||||
3
active/container_matrix/compose/README.md
Normal file
3
active/container_matrix/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
@@ -18,7 +18,7 @@
|
|||||||
|
|
||||||
## Setup matrix Project
|
## Setup matrix Project
|
||||||
|
|
||||||
- [x] Copy and rename this folder to active/podman_matrix
|
- [x] Copy and rename this folder to active/container_matrix
|
||||||
- [x] Find and replace matrix with the name of the service.
|
- [x] Find and replace matrix with the name of the service.
|
||||||
- [x] Create the rootless user to run the podman containers
|
- [x] Create the rootless user to run the podman containers
|
||||||
- [x] Write the compose.yaml spec for your service
|
- [x] Write the compose.yaml spec for your service
|
||||||
@@ -57,8 +57,8 @@ On your local machine:
|
|||||||
podman run \
|
podman run \
|
||||||
--security-opt label=disable \
|
--security-opt label=disable \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd)/active/podman_matrix/compose:/compose \
|
-v $(pwd)/active/container_matrix/compose:/compose \
|
||||||
-v $(pwd)/active/podman_matrix/quadlets:/quadlets \
|
-v $(pwd)/active/container_matrix/quadlets:/quadlets \
|
||||||
quay.io/k9withabone/podlet \
|
quay.io/k9withabone/podlet \
|
||||||
-f /quadlets \
|
-f /quadlets \
|
||||||
-i \
|
-i \
|
||||||
@@ -66,10 +66,10 @@ quay.io/k9withabone/podlet \
|
|||||||
compose /compose/compose.yaml
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
# Copy the files to the server
|
# Copy the files to the server
|
||||||
scp -r active/podman_matrix/quadlets/. matrix:~/.config/containers/systemd/
|
scp -r active/container_matrix/quadlets/. matrix:~/.config/containers/systemd/
|
||||||
|
|
||||||
# Copy the compose files to the server
|
# Copy the compose files to the server
|
||||||
scp -r active/podman_matrix/compose/. matrix:~/.config//
|
scp -r active/container_matrix/compose/. matrix:~/.config//
|
||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -96,8 +96,8 @@ ghcr.io/matrix-construct/tuwunel:latest \
|
|||||||
|
|
||||||
### Expose matrix
|
### Expose matrix
|
||||||
|
|
||||||
1. If you need a domain, follow the [DDNS instructions](/active/podman_ddns/ddns.md#install-a-new-ddns-service)
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
2. For a web service, follow the [Caddy instructions](/active/podman_caddy/caddy.md#adding-a-new-caddy-record)
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
#### firewalld
|
#### firewalld
|
||||||
3
active/container_matrix/quadlets/README.md
Normal file
3
active/container_matrix/quadlets/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Quadlets
|
||||||
|
|
||||||
|
Put your quadlets here.
|
||||||
25
active/container_minecraft/minecraft-compose.yaml
Normal file
25
active/container_minecraft/minecraft-compose.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
testcraft:
|
||||||
|
image: gitea.reeseapps.com/services/minecraft:latest
|
||||||
|
stdin_open: true
|
||||||
|
tty: true
|
||||||
|
volumes:
|
||||||
|
- /srv/minecraft/testcraft:/mc_data
|
||||||
|
ports:
|
||||||
|
- 25565:25565
|
||||||
|
environment:
|
||||||
|
- MAX_RAM=4
|
||||||
|
- MIN_RAM=1
|
||||||
|
restart: always
|
||||||
|
nimcraft:
|
||||||
|
image: gitea.reeseapps.com/services/minecraft:latest
|
||||||
|
stdin_open: true
|
||||||
|
tty: true
|
||||||
|
volumes:
|
||||||
|
- /srv/minecraft/nimcraft:/mc_data
|
||||||
|
ports:
|
||||||
|
- 25566:25565
|
||||||
|
environment:
|
||||||
|
- MAX_RAM=4
|
||||||
|
- MIN_RAM=1
|
||||||
|
restart: always
|
||||||
@@ -34,8 +34,8 @@ podman run \
|
|||||||
--security-opt label=disable \
|
--security-opt label=disable \
|
||||||
--userns keep-id \
|
--userns keep-id \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd)/active/podman_minecraft:/compose \
|
-v $(pwd)/active/container_minecraft:/compose \
|
||||||
-v $(pwd)/active/podman_minecraft/quadlets:/quadlets \
|
-v $(pwd)/active/container_minecraft/quadlets:/quadlets \
|
||||||
quay.io/k9withabone/podlet \
|
quay.io/k9withabone/podlet \
|
||||||
-f /quadlets \
|
-f /quadlets \
|
||||||
-i \
|
-i \
|
||||||
@@ -43,7 +43,7 @@ quay.io/k9withabone/podlet \
|
|||||||
compose /compose/compose.yaml
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
# Copy the files to the server
|
# Copy the files to the server
|
||||||
scp -r active/podman_minecraft/quadlets/. minecraft:~/.config/containers/systemd/
|
scp -r active/container_minecraft/quadlets/. minecraft:~/.config/containers/systemd/
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Quadlets
|
### Install Quadlets
|
||||||
@@ -78,10 +78,10 @@ ssh minecraft systemctl --user restart minecraft
|
|||||||
|
|
||||||
## Expose minecraft
|
## Expose minecraft
|
||||||
|
|
||||||
1. Create your minecraft ddns record first [following these docs](/active/podman_ddns/ddns.md#)
|
1. Create your minecraft ddns record first [following these docs](/active/container_ddns/ddns.md#)
|
||||||
2. Create a SRV record in your DNS provider like the following:
|
2. Create a SRV record in your DNS provider like the following:
|
||||||
|
|
||||||
active/podman_minecraft/secrets/reeseapps_records.json:
|
active/container_minecraft/secrets/reeseapps_records.json:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@@ -105,7 +105,7 @@ ssh minecraft systemctl --user restart minecraft
|
|||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeseapps-zoneid) --change-batch file://active/podman_minecraft/secrets/reeseapps_records.json
|
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeseapps-zoneid) --change-batch file://active/container_minecraft/secrets/reeseapps_records.json
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Test your record with `nslookup`
|
3. Test your record with `nslookup`
|
||||||
@@ -1,12 +1,14 @@
|
|||||||
# Nextcloud AIO
|
# Nextcloud AIO
|
||||||
|
|
||||||
- [Nextcloud AIO](#nextcloud-aio)
|
- [Nextcloud AIO](#nextcloud-aio)
|
||||||
|
- [Recommended Install](#recommended-install)
|
||||||
- [Install with Rootless Podman](#install-with-rootless-podman)
|
- [Install with Rootless Podman](#install-with-rootless-podman)
|
||||||
- [Create the nextcloud user](#create-the-nextcloud-user)
|
- [Create the nextcloud user](#create-the-nextcloud-user)
|
||||||
- [Create the container autostart service](#create-the-container-autostart-service)
|
- [Create the container autostart service](#create-the-container-autostart-service)
|
||||||
- [Install Nextcloud](#install-nextcloud)
|
- [Install Nextcloud](#install-nextcloud)
|
||||||
- [Install Caddy](#install-caddy)
|
- [Install Caddy](#install-caddy)
|
||||||
- [Firewall](#firewall)
|
- [Firewall](#firewall)
|
||||||
|
- [Install with Docker](#install-with-docker)
|
||||||
- [Backups](#backups)
|
- [Backups](#backups)
|
||||||
- [Manual Backups](#manual-backups)
|
- [Manual Backups](#manual-backups)
|
||||||
- [Maintenance Mode](#maintenance-mode)
|
- [Maintenance Mode](#maintenance-mode)
|
||||||
@@ -27,6 +29,16 @@
|
|||||||
|
|
||||||
<https://github.com/nextcloud/all-in-one>
|
<https://github.com/nextcloud/all-in-one>
|
||||||
|
|
||||||
|
## Recommended Install
|
||||||
|
|
||||||
|
<https://github.com/nextcloud/all-in-one#nextcloud-all-in-one>
|
||||||
|
|
||||||
|
1. Create Fedora VM
|
||||||
|
2. [Install Docker](https://docs.docker.com/engine/install/fedora/)
|
||||||
|
3. Create and mount a directory at `/srv/nextcloud-data`
|
||||||
|
4. `scp active/container_nextcloud/nextcloud-compose.yaml nextcloud:`
|
||||||
|
5. `docker compose -f nextcloud-compose.yaml up -d`
|
||||||
|
|
||||||
## Install with Rootless Podman
|
## Install with Rootless Podman
|
||||||
|
|
||||||
Roughly taken from <https://github.com/nextcloud/all-in-one/discussions/3487>
|
Roughly taken from <https://github.com/nextcloud/all-in-one/discussions/3487>
|
||||||
@@ -75,7 +87,7 @@ On the operator
|
|||||||
```bash
|
```bash
|
||||||
# Copy the quadlet files
|
# Copy the quadlet files
|
||||||
scp \
|
scp \
|
||||||
active/podman_nextcloud/nextcloud-aio-mastercontainer.container \
|
active/container_nextcloud/nextcloud-aio-mastercontainer.container \
|
||||||
3dserver:/home/nextcloud/.config/containers/systemd/
|
3dserver:/home/nextcloud/.config/containers/systemd/
|
||||||
|
|
||||||
ssh chown -R nextcloud:nextcloud /home/nextcloud/.config/containers/systemd/
|
ssh chown -R nextcloud:nextcloud /home/nextcloud/.config/containers/systemd/
|
||||||
@@ -158,6 +170,25 @@ systemctl start caddy
|
|||||||
|
|
||||||
Allow traffic to 11000 from your reverse proxy
|
Allow traffic to 11000 from your reverse proxy
|
||||||
|
|
||||||
|
## Install with Docker
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# For Linux and without a web server or reverse proxy already in place:
|
||||||
|
sudo docker run \
|
||||||
|
--init \
|
||||||
|
--sig-proxy=false \
|
||||||
|
--name nextcloud-aio-mastercontainer \
|
||||||
|
--restart always \
|
||||||
|
--publish 8080:8080 \
|
||||||
|
--env APACHE_PORT=11000 \
|
||||||
|
--env APACHE_IP_BINDING=0.0.0.0 \
|
||||||
|
--env APACHE_ADDITIONAL_NETWORK="" \
|
||||||
|
--env SKIP_DOMAIN_VALIDATION=false \
|
||||||
|
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
|
||||||
|
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
|
||||||
|
ghcr.io/nextcloud-releases/all-in-one:latest
|
||||||
|
```
|
||||||
|
|
||||||
## Backups
|
## Backups
|
||||||
|
|
||||||
IMPORTANT: you will need both KEY AND PASSPHRASE to access this repo!
|
IMPORTANT: you will need both KEY AND PASSPHRASE to access this repo!
|
||||||
42
active/container_nextcloud/nextcloud-compose.yaml
Normal file
42
active/container_nextcloud/nextcloud-compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
name: nextcloud-aio # Add the container to the same compose project like all the sibling containers are added to automatically.
|
||||||
|
services:
|
||||||
|
nextcloud-aio-mastercontainer:
|
||||||
|
image: ghcr.io/nextcloud-releases/all-in-one:latest # This is the container image used. You can switch to ghcr.io/nextcloud-releases/all-in-one:beta if you want to help testing new releases. See https://github.com/nextcloud/all-in-one#how-to-switch-the-channel
|
||||||
|
init: true # This setting makes sure that signals from main process inside the container are correctly forwarded to children. See https://docs.docker.com/reference/compose-file/services/#init
|
||||||
|
restart: always # This makes sure that the container starts always together with the host OS. See https://docs.docker.com/reference/compose-file/services/#restart
|
||||||
|
container_name: nextcloud-aio-mastercontainer # This line is not allowed to be changed as otherwise AIO will not work correctly
|
||||||
|
volumes:
|
||||||
|
- nextcloud_aio_mastercontainer:/mnt/docker-aio-config # This line is not allowed to be changed as otherwise the built-in backup solution will not work
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock:ro # May be changed on macOS, Windows or docker rootless. See the applicable documentation. If adjusting, don't forget to also set 'WATCHTOWER_DOCKER_SOCKET_PATH'!
|
||||||
|
network_mode: bridge # This adds the container to the same network as docker run would do. Comment this line and uncomment the line below and the networks section at the end of the file if you want to define a custom MTU size for the docker network
|
||||||
|
ports:
|
||||||
|
- 8080:8080 # This is the AIO interface, served via https and self-signed certificate. See https://github.com/nextcloud/all-in-one#explanation-of-used-ports
|
||||||
|
security_opt: ["label:disable"] # Is needed when using SELinux. See https://github.com/nextcloud/all-in-one#are-there-known-problems-when-selinux-is-enabled
|
||||||
|
environment: # Is needed when using any of the options below
|
||||||
|
# AIO_DISABLE_BACKUP_SECTION: false # Setting this to true allows to hide the backup section in the AIO interface. See https://github.com/nextcloud/all-in-one#how-to-disable-the-backup-section
|
||||||
|
APACHE_PORT: 11000 # Is needed when running behind a web server or reverse proxy (like Apache, Nginx, Caddy, Cloudflare Tunnel and else). See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||||
|
APACHE_IP_BINDING: 0.0.0.0 # Should be set when running behind a web server or reverse proxy (like Apache, Nginx, Caddy, Cloudflare Tunnel and else) that is running on the same host. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||||
|
# APACHE_ADDITIONAL_NETWORK: frontend_net # (Optional) Connect the apache container to an additional docker network. Needed when behind a web server or reverse proxy (like Apache, Nginx, Caddy, Cloudflare Tunnel and else) running in a different docker network on same server. See https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md
|
||||||
|
# BORG_RETENTION_POLICY: --keep-within=7d --keep-weekly=4 --keep-monthly=6 # Allows to adjust borgs retention policy. See https://github.com/nextcloud/all-in-one#how-to-adjust-borgs-retention-policy
|
||||||
|
# COLLABORA_SECCOMP_DISABLED: false # Setting this to true allows to disable Collabora's Seccomp feature. See https://github.com/nextcloud/all-in-one#how-to-disable-collaboras-seccomp-feature
|
||||||
|
# DOCKER_API_VERSION: 1.44 # You can adjust the internally used docker api version with this variable. ⚠️⚠️⚠️ Warning: please note that only the default api version (unset this variable) is supported and tested by the maintainers of Nextcloud AIO. So use this on your own risk and things might break without warning. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-internally-used-docker-api-version
|
||||||
|
# FULLTEXTSEARCH_JAVA_OPTIONS: "-Xms1024M -Xmx1024M" # Allows to adjust the fulltextsearch java options. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-fulltextsearch-java-options
|
||||||
|
NEXTCLOUD_DATADIR: /srv/nextcloud-data # Allows to set the host directory for Nextcloud's datadir. ⚠️⚠️⚠️ Warning: do not set or adjust this value after the initial Nextcloud installation is done! See https://github.com/nextcloud/all-in-one#how-to-change-the-default-location-of-nextclouds-datadir
|
||||||
|
# NEXTCLOUD_MOUNT: /mnt/ # Allows the Nextcloud container to access the chosen directory on the host. See https://github.com/nextcloud/all-in-one#how-to-allow-the-nextcloud-container-to-access-directories-on-the-host
|
||||||
|
NEXTCLOUD_UPLOAD_LIMIT: 128G # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-upload-limit-for-nextcloud
|
||||||
|
NEXTCLOUD_MAX_TIME: 3600 # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-max-execution-time-for-nextcloud
|
||||||
|
NEXTCLOUD_MEMORY_LIMIT: 1024M # Can be adjusted if you need more. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-php-memory-limit-for-nextcloud
|
||||||
|
# NEXTCLOUD_TRUSTED_CACERTS_DIR: /path/to/my/cacerts # CA certificates in this directory will be trusted by the OS of the nextcloud container (Useful e.g. for LDAPS) See https://github.com/nextcloud/all-in-one#how-to-trust-user-defined-certification-authorities-ca
|
||||||
|
# NEXTCLOUD_STARTUP_APPS: deck twofactor_totp tasks calendar contacts notes # Allows to modify the Nextcloud apps that are installed on starting AIO the first time. See https://github.com/nextcloud/all-in-one#how-to-change-the-nextcloud-apps-that-are-installed-on-the-first-startup
|
||||||
|
# NEXTCLOUD_ADDITIONAL_APKS: imagemagick # This allows to add additional packages to the Nextcloud container permanently. Default is imagemagick but can be overwritten by modifying this value. See https://github.com/nextcloud/all-in-one#how-to-add-os-packages-permanently-to-the-nextcloud-container
|
||||||
|
# NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS: imagick # This allows to add additional php extensions to the Nextcloud container permanently. Default is imagick but can be overwritten by modifying this value. See https://github.com/nextcloud/all-in-one#how-to-add-php-extensions-permanently-to-the-nextcloud-container
|
||||||
|
# NEXTCLOUD_ENABLE_DRI_DEVICE: true # This allows to enable the /dev/dri device for containers that profit from it. ⚠️⚠️⚠️ Warning: this only works if the '/dev/dri' device is present on the host! If it should not exist on your host, don't set this to true as otherwise the Nextcloud container will fail to start! See https://github.com/nextcloud/all-in-one#how-to-enable-hardware-acceleration-for-nextcloud
|
||||||
|
# NEXTCLOUD_ENABLE_NVIDIA_GPU: true # This allows to enable the NVIDIA runtime and GPU access for containers that profit from it. ⚠️⚠️⚠️ Warning: this only works if an NVIDIA gpu is installed on the server. See https://github.com/nextcloud/all-in-one#how-to-enable-hardware-acceleration-for-nextcloud.
|
||||||
|
# NEXTCLOUD_KEEP_DISABLED_APPS: false # Setting this to true will keep Nextcloud apps that are disabled in the AIO interface and not uninstall them if they should be installed. See https://github.com/nextcloud/all-in-one#how-to-keep-disabled-apps
|
||||||
|
# SKIP_DOMAIN_VALIDATION: false # This should only be set to true if things are correctly configured. See https://github.com/nextcloud/all-in-one#how-to-skip-the-domain-validation
|
||||||
|
# TALK_PORT: 3478 # This allows to adjust the port that the talk container is using which is exposed on the host. See https://github.com/nextcloud/all-in-one#how-to-adjust-the-talk-port
|
||||||
|
# WATCHTOWER_DOCKER_SOCKET_PATH: /var/run/docker.sock # Needs to be specified if the docker socket on the host is not located in the default '/var/run/docker.sock'. Otherwise mastercontainer updates will fail. For macos it needs to be '/var/run/docker.sock'
|
||||||
|
|
||||||
|
volumes: # If you want to store the data on a different drive, see https://github.com/nextcloud/all-in-one#how-to-store-the-filesinstallation-on-a-separate-drive
|
||||||
|
nextcloud_aio_mastercontainer:
|
||||||
|
name: nextcloud_aio_mastercontainer # This line is not allowed to be changed as otherwise the built-in backup solution will not work
|
||||||
81
active/container_nginx/nginx.md
Normal file
81
active/container_nginx/nginx.md
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
# Ngnix
|
||||||
|
|
||||||
|
## Initial Install
|
||||||
|
|
||||||
|
Create your initial `secrets/nginx.conf` to look something like:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
user nginx;
|
||||||
|
worker_processes auto;
|
||||||
|
|
||||||
|
error_log /var/log/nginx/error.log notice;
|
||||||
|
pid /var/run/nginx.pid;
|
||||||
|
|
||||||
|
events {
|
||||||
|
worker_connections 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
stream {
|
||||||
|
log_format stream_logs '$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time "$upstream_addr"';
|
||||||
|
|
||||||
|
access_log /dev/stdout stream_logs;
|
||||||
|
error_log stderr info;
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 25565;
|
||||||
|
listen [::]:25565;
|
||||||
|
proxy_pass my-minecraft-server.internal.dns:25565;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Create the systemd service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Get the initial configuration
|
||||||
|
vim /etc/containers/systemd/nginx.container
|
||||||
|
```
|
||||||
|
|
||||||
|
```conf
|
||||||
|
[Unit]
|
||||||
|
Description=Nginx
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AddCapability=NET_ADMIN
|
||||||
|
ContainerName=nginx
|
||||||
|
Image=docker.io/nginx
|
||||||
|
Network=host
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/etc/nginx:/etc/nginx
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Reload the service and start it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl start nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
## Update the Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_nginx/secrets/nginx.conf proxy:/etc/nginx/nginx.conf
|
||||||
|
ssh proxy
|
||||||
|
systemctl restart nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
## Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Watch client connections
|
||||||
|
journalctl -u nginx -f | grep -e 'client .* connected'
|
||||||
|
|
||||||
|
# Watch upstream proxy connections
|
||||||
|
journalctl -u nginx -f | grep -e 'proxy .* connected'
|
||||||
|
```
|
||||||
@@ -84,11 +84,12 @@ pgrep ollama | xargs -I '%' sh -c 'kill %'
|
|||||||
## Run Anything LLM Interface
|
## Run Anything LLM Interface
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
podman run \
|
docker run \
|
||||||
-d \
|
-d \
|
||||||
-p 3001:3001 \
|
-p 3001:3001 \
|
||||||
--name anything-llm \
|
--name anything-llm \
|
||||||
--cap-add SYS_ADMIN \
|
--cap-add SYS_ADMIN \
|
||||||
|
--restart always \
|
||||||
-v anything-llm:/app/server \
|
-v anything-llm:/app/server \
|
||||||
-e STORAGE_DIR="/app/server/storage" \
|
-e STORAGE_DIR="/app/server/storage" \
|
||||||
docker.io/mintplexlabs/anythingllm
|
docker.io/mintplexlabs/anythingllm
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user