Compare commits
197 Commits
7a2589d01f
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
acf6421b53
|
|||
|
875795a409
|
|||
|
b9d1c2a9a3
|
|||
|
6f8b7ffca6
|
|||
|
cc75227a77
|
|||
|
9ae82fc3de
|
|||
|
92edf49948
|
|||
|
25d3a7805c
|
|||
|
eb67191706
|
|||
|
d51560f979
|
|||
|
88ecb458e1
|
|||
|
31739320aa
|
|||
|
f70028cf63
|
|||
|
ecf4ae1058
|
|||
|
eff2aa4066
|
|||
|
a53e67653d
|
|||
|
d48b9a66cb
|
|||
|
2c5af8507c
|
|||
|
ba66c47719
|
|||
|
da0b06768e
|
|||
|
1c6e1b7032
|
|||
|
087d8888cf
|
|||
|
cb486ae289
|
|||
|
cd56318ab0
|
|||
|
416321206d
|
|||
|
f3c313e610
|
|||
|
52c6dac263
|
|||
|
d4fbbb185f
|
|||
|
7d2e8b6b7b
|
|||
|
3bfa67e605
|
|||
|
d929ac8888
|
|||
|
837ea91a52
|
|||
|
71a27b1b91
|
|||
|
8d250318b1
|
|||
|
041fbd0f5f
|
|||
|
498e52c134
|
|||
|
dd11ef60cb
|
|||
|
65208987ea
|
|||
|
f530181e73
|
|||
|
bcd087dc94
|
|||
|
59bdafed6a
|
|||
|
2a7a177cec
|
|||
|
fdc0f0f2c5
|
|||
|
27180d92b1
|
|||
|
2b62ad0956
|
|||
|
fb19c81d47
|
|||
|
1d39fb4dc6
|
|||
|
03c8e95275
|
|||
|
90105f8997
|
|||
|
c1e083ec04
|
|||
|
5b79da95fd
|
|||
|
1737c7a79a
|
|||
|
8ea3fe5fc4
|
|||
|
73d78db715
|
|||
|
e7835970a2
|
|||
|
f242895b51
|
|||
|
70718f209b
|
|||
|
d089c981c8
|
|||
|
495cacfb96
|
|||
|
90ba436a47
|
|||
|
3f2ac15591
|
|||
|
2559aebd5d
|
|||
|
ddb9720800
|
|||
|
525e14965d
|
|||
|
7626cdf998
|
|||
|
40f221376f
|
|||
|
27e9c42d4c
|
|||
|
43159dca7e
|
|||
|
57dcd6a806
|
|||
|
463595af75
|
|||
|
d4571c9b70
|
|||
|
4c0a263d50
|
|||
|
70259d9542
|
|||
|
4f3102a2ff
|
|||
|
ed65f8924d
|
|||
|
737a58a13c
|
|||
|
a2cef18efe
|
|||
|
1c245a593a
|
|||
|
b65ef9cbb7
|
|||
|
ea3e8f9c10
|
|||
|
b5aecf1565
|
|||
|
380d8f8e48
|
|||
|
07a297f818
|
|||
|
d7224b038b
|
|||
|
fc62219db7
|
|||
|
2ebd97c345
|
|||
|
270e86bfd0
|
|||
|
7305e3a35b
|
|||
|
aabbd8286f
|
|||
|
37f7d442a1
|
|||
|
3ff805fa39
|
|||
|
1ae62e70ed
|
|||
|
91f4687c07
|
|||
|
dc2df62d04
|
|||
|
b75aac76c2
|
|||
|
5161dced6e
|
|||
|
d9ed144578
|
|||
|
5516f9530b
|
|||
|
621be95870
|
|||
|
b526901546
|
|||
|
b328081b59
|
|||
|
113b859927
|
|||
|
57ff005186
|
|||
|
7ccedb9768
|
|||
|
ef527abef4
|
|||
|
75f4aaebf1
|
|||
|
1396e09227
|
|||
|
cbe8c4a369
|
|||
|
2f88c75655
|
|||
|
0f4b73720c
|
|||
|
b97f41eb70
|
|||
|
6df02e8dff
|
|||
|
57ae6b7e72
|
|||
|
e3ba1759c4
|
|||
|
af70d1d396
|
|||
|
5b474c7190
|
|||
|
d94cd01008
|
|||
|
afb27c512c
|
|||
|
a500c8a572
|
|||
|
c5748d81da
|
|||
|
b38390029f
|
|||
|
b116ea73ec
|
|||
|
920aeef7f3
|
|||
|
9038962f29
|
|||
|
3fed164193
|
|||
|
487e03c0bd
|
|||
|
cf0a7373d4
|
|||
|
e0adee5362
|
|||
|
8f3e624925
|
|||
| e1e551c5cc | |||
| 23d3949421 | |||
| 714dd32ff6 | |||
| 8035fa38dc | |||
| b91cc1adc3 | |||
| 4fe56de990 | |||
| 9ef631b266 | |||
| 8c39f749c7 | |||
| 1361c726d9 | |||
| 1879158b6c | |||
| 7b9968762a | |||
| 250ffeb266 | |||
| de6c1941c5 | |||
| 9bc09a4b98 | |||
| 79377b3653 | |||
| d44bca3f2b | |||
| 660735f0ae | |||
| 6dfd30e175 | |||
| 0e5250d84d | |||
| 556149c583 | |||
| 72e13f53aa | |||
| e9c68abeb9 | |||
| 69e8e89e72 | |||
| 85e74541c2 | |||
| cb66fb6195 | |||
| 8d98cd06fa | |||
| a85627b3b2 | |||
| f046e6edc2 | |||
| a32f055ede | |||
| 0c6509cc17 | |||
| 82b60c086c | |||
| 999869cab6 | |||
| 548cdc8b87 | |||
| 4832b283bb | |||
| 9e83048248 | |||
| f2d684fa7c | |||
| 7980bfb381 | |||
| 20690c48e5 | |||
| ca582333f1 | |||
| dae4063f25 | |||
| 5184c84d50 | |||
| 3f3a03ee05 | |||
| 22c1d635c6 | |||
| 5512c266eb | |||
| de8b827cfb | |||
| 7b93f740ec | |||
| b3e4a45996 | |||
| ab2b033c54 | |||
|
|
c2fa408c1e | ||
|
|
a469444811 | ||
| ed2088d0dc | |||
| 7099e72d6f | |||
| d44773389e | |||
| 03e959c215 | |||
| b45bcd802e | |||
| 38b81fda9a | |||
| 960e91f911 | |||
| 4723ffb13d | |||
| ef9104c796 | |||
| 6e393d90ee | |||
| 9acff25d43 | |||
| 3752f9da61 | |||
| 9417e711a9 | |||
| 9a3382862d | |||
| 237a906b68 | |||
| 85de8a54d9 | |||
| e30db947b0 | |||
| 3865e64b19 |
30
.gitea/workflows/caddy.yaml
Normal file
30
.gitea/workflows/caddy.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Podman DDNS Image
|
||||||
|
run-name: Build and Push the Custom Caddy Image with Route53 DNS Certbot
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- active/container_caddy/**
|
||||||
|
- .gitea/workflows/caddy.yaml
|
||||||
|
schedule:
|
||||||
|
- cron: "@daily"
|
||||||
|
jobs:
|
||||||
|
build-and-push-ddns:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: gitea.ref == 'refs/heads/main'
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Login to Gitea Registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: gitea.reeseapps.com
|
||||||
|
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||||
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: https://github.com/docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: ${{ gitea.workspace }}/active/container_caddy
|
||||||
|
file: ${{ gitea.workspace }}/active/container_caddy/Containerfile
|
||||||
|
push: true
|
||||||
|
tags: "gitea.reeseapps.com/services/caddy:latest,gitea.reeseapps.com/services/caddy:${{gitea.sha}}"
|
||||||
|
no-cache: true
|
||||||
30
.gitea/workflows/ddns.yaml
Normal file
30
.gitea/workflows/ddns.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: Podman DDNS Image
|
||||||
|
run-name: Build and Push the Podman DDNS Image
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- active/container_ddns/**
|
||||||
|
- .gitea/workflows/ddns.yaml
|
||||||
|
schedule:
|
||||||
|
- cron: "@daily"
|
||||||
|
jobs:
|
||||||
|
build-and-push-ddns:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: gitea.ref == 'refs/heads/main'
|
||||||
|
steps:
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Login to Gitea Registry
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: gitea.reeseapps.com
|
||||||
|
username: ${{ secrets.REGISTRY_USERNAME }}
|
||||||
|
password: ${{ secrets.REGISTRY_PASSWORD }}
|
||||||
|
- name: Build and push Docker image
|
||||||
|
uses: https://github.com/docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: ${{ gitea.workspace }}/active/container_ddns
|
||||||
|
file: ${{ gitea.workspace }}/active/container_ddns/Containerfile
|
||||||
|
push: true
|
||||||
|
tags: "gitea.reeseapps.com/services/ddns:latest,gitea.reeseapps.com/services/ddns:${{gitea.sha}}"
|
||||||
|
no-cache: true
|
||||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -3,3 +3,12 @@ venv/
|
|||||||
tmp/
|
tmp/
|
||||||
Unsorted/
|
Unsorted/
|
||||||
volumes/
|
volumes/
|
||||||
|
__pycache__/
|
||||||
|
.pytest_cache/
|
||||||
|
.venv/
|
||||||
|
.mypy_cache
|
||||||
|
TODO.md
|
||||||
|
eicar.com
|
||||||
|
*.pp
|
||||||
|
*.mod
|
||||||
|
*.log
|
||||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.13
|
||||||
34
.vscode/code_oss_extensions.txt
vendored
Normal file
34
.vscode/code_oss_extensions.txt
vendored
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
charliermarsh.ruff
|
||||||
|
eamodio.gitlens
|
||||||
|
franneck94.vscode-python-config
|
||||||
|
franneck94.vscode-python-dev-extension-pack
|
||||||
|
ms-pyright.pyright
|
||||||
|
ms-python.debugpy
|
||||||
|
ms-python.mypy-type-checker
|
||||||
|
ms-python.python
|
||||||
|
ms-python.vscode-python-envs
|
||||||
|
njpwerner.autodocstring
|
||||||
|
njqdev.vscode-python-typehint
|
||||||
|
redhat.vscode-yaml
|
||||||
|
stkb.rewrap
|
||||||
|
streetsidesoftware.code-spell-checker
|
||||||
|
tamasfe.even-better-toml
|
||||||
|
vue.volar
|
||||||
|
yzhang.markdown-all-in-onecharliermarsh.ruff
|
||||||
|
eamodio.gitlens
|
||||||
|
franneck94.vscode-python-config
|
||||||
|
franneck94.vscode-python-dev-extension-pack
|
||||||
|
hashicorp.hcl
|
||||||
|
ms-pyright.pyright
|
||||||
|
ms-python.debugpy
|
||||||
|
ms-python.mypy-type-checker
|
||||||
|
ms-python.python
|
||||||
|
ms-python.vscode-python-envs
|
||||||
|
njpwerner.autodocstring
|
||||||
|
njqdev.vscode-python-typehint
|
||||||
|
redhat.vscode-yaml
|
||||||
|
stkb.rewrap
|
||||||
|
streetsidesoftware.code-spell-checker
|
||||||
|
tamasfe.even-better-toml
|
||||||
|
vue.volar
|
||||||
|
yzhang.markdown-all-in-one
|
||||||
86
.vscode/settings.json
vendored
Normal file
86
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
{
|
||||||
|
"[css]": {
|
||||||
|
"editor.suggest.insertMode": "replace",
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[django-html]": {
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.quickSuggestions": {
|
||||||
|
"comments": true,
|
||||||
|
"other": true,
|
||||||
|
"strings": true
|
||||||
|
},
|
||||||
|
"editor.suggest.insertMode": "replace",
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[dockercompose]": {
|
||||||
|
"breadcrumbs.showConstants": true,
|
||||||
|
"editor.quickSuggestions": {
|
||||||
|
"comments": false,
|
||||||
|
"other": true,
|
||||||
|
"strings": true
|
||||||
|
},
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[helm]": {
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.tabSize": 2,
|
||||||
|
"rewrap.autoWrap.enabled": true,
|
||||||
|
"rewrap.wholeComment": true,
|
||||||
|
"rewrap.wrappingColumn": 73
|
||||||
|
},
|
||||||
|
"[html]": {
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.suggest.insertMode": "replace",
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[javascript]": {
|
||||||
|
"editor.maxTokenizationLineLength": 2500,
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[markdown]": {
|
||||||
|
"editor.defaultFormatter": "yzhang.markdown-all-in-one",
|
||||||
|
"editor.quickSuggestions": {
|
||||||
|
"comments": "off",
|
||||||
|
"other": "off",
|
||||||
|
"strings": "off"
|
||||||
|
},
|
||||||
|
"editor.tabSize": 4,
|
||||||
|
"editor.wordWrap": "off"
|
||||||
|
},
|
||||||
|
"[python]": {
|
||||||
|
"editor.codeActionsOnSave": {
|
||||||
|
"source.organizeImports": "always"
|
||||||
|
},
|
||||||
|
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||||
|
"editor.formatOnSave": true,
|
||||||
|
"editor.formatOnType": true
|
||||||
|
},
|
||||||
|
"[shellscript]": {
|
||||||
|
"editor.tabSize": 2,
|
||||||
|
"files.eol": "\n"
|
||||||
|
},
|
||||||
|
"[terraform]": {
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[typescript]": {
|
||||||
|
"editor.maxTokenizationLineLength": 2500,
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"[vue]": {
|
||||||
|
"editor.insertSpaces": true,
|
||||||
|
"editor.tabSize": 2,
|
||||||
|
"gitlens.codeLens.scopes": [
|
||||||
|
"document"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"[yaml]": {
|
||||||
|
"editor.tabSize": 2
|
||||||
|
},
|
||||||
|
"cSpell.userWords": [
|
||||||
|
"Kubernetes",
|
||||||
|
"clamd",
|
||||||
|
"rtype"
|
||||||
|
],
|
||||||
|
}
|
||||||
2
.vscode/tasks.json
vendored
2
.vscode/tasks.json
vendored
@@ -6,7 +6,7 @@
|
|||||||
{
|
{
|
||||||
"label": "Build arch-toolbox",
|
"label": "Build arch-toolbox",
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "./infrastructure/graduated/distoolbox/arch-build.sh",
|
"command": "./active/software_distoolbox/arch-build.sh",
|
||||||
"problemMatcher": [],
|
"problemMatcher": [],
|
||||||
"group": {
|
"group": {
|
||||||
"kind": "build",
|
"kind": "build",
|
||||||
|
|||||||
25
.vscode/vscode.md
vendored
25
.vscode/vscode.md
vendored
@@ -8,6 +8,7 @@
|
|||||||
- [Navigation](#navigation)
|
- [Navigation](#navigation)
|
||||||
- [Extensions](#extensions)
|
- [Extensions](#extensions)
|
||||||
- [Continue](#continue)
|
- [Continue](#continue)
|
||||||
|
- [Pylance Type Checking](#pylance-type-checking)
|
||||||
|
|
||||||
## Debugpy Snippet
|
## Debugpy Snippet
|
||||||
|
|
||||||
@@ -108,27 +109,19 @@ previous positions.
|
|||||||
To save a list of installed extensions run:
|
To save a list of installed extensions run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
code --list-extensions >> vscode_extensions.txt
|
code --list-extensions >> .vscode/code_oss_extensions.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
To install that list of extensions run:
|
To install that list of extensions run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cat vscode_extensions.txt | xargs -L 1 code --install-extension
|
cat .vscode/code_oss_extensions.txt | xargs -L 1 code --install-extension
|
||||||
```
|
```
|
||||||
|
|
||||||
## Continue
|
### Continue
|
||||||
|
|
||||||
```json
|
Continue -> Settings -> Help -> Quickstart
|
||||||
{
|
|
||||||
"models": [
|
### Pylance Type Checking
|
||||||
{
|
|
||||||
"title": "qwen2.5-coder:32b",
|
Settings -> `python.analysis.typeChecking`
|
||||||
"provider": "ollama",
|
|
||||||
"apiBase": "https://ollama.example.com",
|
|
||||||
"apiKey": "...",
|
|
||||||
"model": "qwen2.5-coder:32b"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
...
|
|
||||||
```
|
|
||||||
317
README.md
317
README.md
@@ -1,81 +1,281 @@
|
|||||||
# Homelab
|
# Homelab
|
||||||
|
|
||||||
A project to store homelab stuff.
|
Welcome to my homelab!
|
||||||
|
|
||||||
Just here for the Arch distoolbox?
|
This repo is an in-flux collection of my personal notes, docs, and tutorials of
|
||||||
|
things I find interesting and self-host.
|
||||||
|
|
||||||
[Arch Distoolbox](infrastructure/graduated/distoolbox/distoolbox.md)
|
Take a look around!
|
||||||
|
|
||||||

|
- "Active" projects (/active) are in use today and generally fall into these
|
||||||
|
categories:
|
||||||
|
- `aws_` is for aws notes
|
||||||
|
- `device_` is for hardware
|
||||||
|
- `kubernetes_` is for helm charts or other kubernetes hosted software
|
||||||
|
- `os_` is for operating system setup guides and notes
|
||||||
|
- `podman_` is for containerized projects
|
||||||
|
- `software_` is for cli tools, projects without a specific way to host them,
|
||||||
|
or other misfits
|
||||||
|
|
||||||
|
All active projects will have a markdown file named after the project. This is
|
||||||
|
for quick access via shortcuts like `ctrl + p` in vscode. For example, I want
|
||||||
|
to check my notes for `virsh` so I would type `ctrl + p` "virsh" to open
|
||||||
|
"virsh.md".
|
||||||
|
|
||||||
|
"Retired" projects (/retired) is a graveyard of things I didn't want to delete.
|
||||||
|
|
||||||
|
"Template" projects (/templates) are quick templates for creating new active
|
||||||
|
projects with sane defaults.
|
||||||
|
|
||||||
|
I keep my GPG and SSH keys in `keys` if you want to add those to your keyring
|
||||||
|
or give me access to your servers.
|
||||||
|
|
||||||
## Table of Contents
|
## Table of Contents
|
||||||
|
|
||||||
- [Homelab](#homelab)
|
- [Homelab](#homelab)
|
||||||
- [Table of Contents](#table-of-contents)
|
- [Table of Contents](#table-of-contents)
|
||||||
- [Fun Facts](#fun-facts)
|
- [Fun Facts](#fun-facts)
|
||||||
|
- [Keyboard Shortcuts](#keyboard-shortcuts)
|
||||||
|
- [inputrc](#inputrc)
|
||||||
|
- ["find ." shortcuts](#find--shortcuts)
|
||||||
|
- [tmux](#tmux)
|
||||||
|
- [bash](#bash)
|
||||||
|
- [Bulk File/Folder Renaming](#bulk-filefolder-renaming)
|
||||||
|
- [SSH Setup](#ssh-setup)
|
||||||
|
- [Git GPG Commit Signing](#git-gpg-commit-signing)
|
||||||
|
- [Important Dates and Times](#important-dates-and-times)
|
||||||
- [Project Lifecycle](#project-lifecycle)
|
- [Project Lifecycle](#project-lifecycle)
|
||||||
- [Supported Projects](#supported-projects)
|
- [Project Types](#project-types)
|
||||||
- [Graduation Requirements](#graduation-requirements)
|
- [Active Project Requirements](#active-project-requirements)
|
||||||
- [Retirement Requirements](#retirement-requirements)
|
- [Retirement Requirements](#retirement-requirements)
|
||||||
|
- [Project Structure](#project-structure)
|
||||||
|
- [Creating a Project](#creating-a-project)
|
||||||
- [Order of Operations](#order-of-operations)
|
- [Order of Operations](#order-of-operations)
|
||||||
|
|
||||||
## Fun Facts
|
## Fun Facts
|
||||||
|
|
||||||
|
### Keyboard Shortcuts
|
||||||
|
|
||||||
On linux, <kbd>ctrl</kbd>+<kbd>shift</kbd>+<kbd>u</kbd>, then, while holding
|
On linux, <kbd>ctrl</kbd>+<kbd>shift</kbd>+<kbd>u</kbd>, then, while holding
|
||||||
<kbd>ctrl</kbd>+<kbd>shift</kbd>, typing <kbd>b</kbd>+<kbd>0</kbd> will type a ° (degree) symbol. Also you
|
<kbd>ctrl</kbd>+<kbd>shift</kbd>, typing <kbd>b</kbd>+<kbd>0</kbd> will type a
|
||||||
can enter any unicode symbol this way.
|
° (degree) symbol. Also you can enter any unicode symbol this way.
|
||||||
|
|
||||||
|
In vim: `esc + o` will take you to the end of a file and insert a new line.
|
||||||
|
|
||||||
|
### inputrc
|
||||||
|
|
||||||
|
Add this to your `~/.inputrc` to allow ctrl + backspace to delete whole words.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
"\C-h": backward-kill-word
|
||||||
|
```
|
||||||
|
|
||||||
|
### "find ." shortcuts
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Change file mode for a bunch of directories
|
||||||
|
find . -type d -exec chmod 755 {} \;
|
||||||
|
```
|
||||||
|
|
||||||
|
### tmux
|
||||||
|
|
||||||
|
- Vertical: ctrl + b + "
|
||||||
|
- Horizontal: ctrl + b + %
|
||||||
|
- Event Horizontal Distribution: ctrl + b + alt + 1
|
||||||
|
- Even Vertical Distribution: ctrl + b + alt + 2
|
||||||
|
- Swap pane order: ctrl + b + : -> swap-pane -t 0
|
||||||
|
|
||||||
|
### bash
|
||||||
|
|
||||||
|
<https://tecadmin.net/bash-special-variables/>
|
||||||
|
|
||||||
|
Here are some handy references for default bash variables
|
||||||
|
|
||||||
|
```text
|
||||||
|
`$0` – The name of the script being executed.
|
||||||
|
`$1-$9` – The first nine command-line arguments.
|
||||||
|
`$#` – The number of command-line arguments.
|
||||||
|
`$*` – All command-line arguments as a single string.
|
||||||
|
`$@` – All command-line arguments as an array.
|
||||||
|
`$?` – The exit status of the last executed command.
|
||||||
|
`$$` – The process ID of the current shell.
|
||||||
|
`$!` – The process ID of the last background command.
|
||||||
|
`$-` – Shows the current shell options or flags.
|
||||||
|
```
|
||||||
|
|
||||||
|
And here are the meanings of the shell options
|
||||||
|
|
||||||
|
```text
|
||||||
|
h – Remember the location of commands as they are looked up
|
||||||
|
i – Interactive shell
|
||||||
|
m – Job control is enabled
|
||||||
|
B – Brace expansion is enabled
|
||||||
|
H – History substitution is enabled
|
||||||
|
```
|
||||||
|
|
||||||
|
So to check if you are in an interactive shell:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
[ $- == *i* ]] && Some command here
|
||||||
|
```
|
||||||
|
|
||||||
|
### Bulk File/Folder Renaming
|
||||||
|
|
||||||
|
```bash
|
||||||
|
for change_dir in $(ls | grep 'podman_*'); do
|
||||||
|
new_name=$(echo $change_dir | sed 's/podman_/container_/g')
|
||||||
|
mv $change_dir $new_name`;
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
## SSH Setup
|
||||||
|
|
||||||
|
Generate a key (password protect it!)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Pick one of the below key types
|
||||||
|
# ed25519
|
||||||
|
ssh-keygen -C ssh@ducoterra.net -t ed25519
|
||||||
|
# rsa 4096
|
||||||
|
ssh-keygen -C ssh@ducoterra.net -t rsa -b 4096
|
||||||
|
|
||||||
|
# Inspect a key
|
||||||
|
ssh-keygen -l -f ~/.ssh/id_rsa
|
||||||
|
|
||||||
|
# Change the password
|
||||||
|
ssh-keygen -p -f ~/.ssh/id_rsa
|
||||||
|
```
|
||||||
|
|
||||||
|
In your ~/.ssh/config, add the following line to set the default key
|
||||||
|
|
||||||
|
```conf
|
||||||
|
IdentityFile ~/.foo/identity
|
||||||
|
```
|
||||||
|
|
||||||
|
Then add a host to your local computer
|
||||||
|
|
||||||
|
```bash
|
||||||
|
Host <hostname>
|
||||||
|
Hostname <host.something.com or IP address>
|
||||||
|
User <remote user>
|
||||||
|
Port <remote port>
|
||||||
|
```
|
||||||
|
|
||||||
|
And copy the key to a remote computer
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy the generated key to the server using password auth. Assumes password auth enabled.
|
||||||
|
ssh-copy-id -f -i ~/.ssh/id_ed25519 ${REMOTE_USER}@${REMOTE_HOST}
|
||||||
|
|
||||||
|
# Log into the server with your key
|
||||||
|
ssh -i ${KEY_NAME} ${REMOTE_HOST}
|
||||||
|
# Copy authorized_keys to root
|
||||||
|
sudo mkdir -p /root/.ssh
|
||||||
|
sudo cp ~/.ssh/authorized_keys /root/.ssh/authorized_keys
|
||||||
|
exit
|
||||||
|
|
||||||
|
# login and disable password auth
|
||||||
|
ssh ${REMOTE_HOST}
|
||||||
|
mkdir -p /etc/ssh/sshd_config.d
|
||||||
|
echo "PasswordAuthentication no" > /etc/ssh/sshd_config.d/01-prohibit-password.conf
|
||||||
|
systemctl restart sshd
|
||||||
|
|
||||||
|
# OPTIONAL: Disable sudo password
|
||||||
|
echo '%wheel ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/01-nopasswd-wheel
|
||||||
|
|
||||||
|
exit
|
||||||
|
|
||||||
|
# Test if you can SSH with a password
|
||||||
|
ssh -o PubkeyAuthentication=no ducoterra@${SSH_HOST}.reeselink.com
|
||||||
|
|
||||||
|
# Test that you can log into the server with ssh config
|
||||||
|
ssh $SSH_HOST
|
||||||
|
```
|
||||||
|
|
||||||
|
## Git GPG Commit Signing
|
||||||
|
|
||||||
|
1. Use `gpg --list-key 'git@ducoterra.net'` to find your key
|
||||||
|
2. Use `git config --global user.signingkey 0A46826A...` to set the signing key
|
||||||
|
3. Use `gpg --export -a 'git@ducoterra.net'` to export the key to copy into Gitea/Github/Gitlab
|
||||||
|
|
||||||
|
Now you can sign commits with `git commit -S`.
|
||||||
|
|
||||||
|
Alternatively, you can sign every commit by default with `git config --global commit.gpgsign true`.
|
||||||
|
|
||||||
|
You can verify a commit with `git verify-commit e1e551c`. If the commit is
|
||||||
|
signed you'll see an output. If not, nothing will show.
|
||||||
|
|
||||||
|
## Important Dates and Times
|
||||||
|
|
||||||
|
| Time | Day | Description |
|
||||||
|
| ----- | --- | ---------------------------------- |
|
||||||
|
| 00:00 | All | Automated builds |
|
||||||
|
| 00:00 | All | NAS Snapshots |
|
||||||
|
| 02:00 | All | Backups |
|
||||||
|
| 04:00 | All | Bare Metal Server Security Updates |
|
||||||
|
| 05:00 | All | VM Server Security Updates |
|
||||||
|
| 05:00 | All | Unifi Protect Firmware Updates |
|
||||||
|
| 06:00 | All | Unifi Network Firmware Updates |
|
||||||
|
|
||||||
## Project Lifecycle
|
## Project Lifecycle
|
||||||
|
|
||||||
Projects will fall into one of the three following categories:
|
Projects will either be `active` or `retired`.
|
||||||
|
|
||||||
1. Incubating
|
Active projects are being actively developed. They are in-use, stable, and
|
||||||
2. Graduated
|
production ready. Active projects should meet and track the [active project
|
||||||
3. Retired
|
requirements](#active-project-requirements)
|
||||||
|
|
||||||
Incubating projects are experimental or prototypal. They're being actively developed and aren't
|
Retired projects are no longer in use or recommended. They are kept for
|
||||||
ready for production deployment. These projects may appear and disappear without warning and are not
|
reference. Retired projects must meet the [retirement
|
||||||
stable. There is no minimum requirement for a project to be in incubation.
|
requirements](#retirement-requirements)
|
||||||
|
|
||||||
Graduated projects are in-use, stable, and production ready. They met the [graduation
|
You'll notice that most of the active projects have scripts or examples that
|
||||||
requirements](#graduation-requirements) and are actively maintained.
|
use the `active` path as part of their install process. When moved outside the
|
||||||
|
`active` directory their scripts and examples break. This is intentional. If
|
||||||
|
you want a retired project to work again, bring it back to the active
|
||||||
|
directory.
|
||||||
|
|
||||||
Retired projects are no longer in use or recommended. They are kept for reference. Retired projects
|
## Project Types
|
||||||
must meet the [retirement requirements](#retirement-requirements)
|
|
||||||
|
|
||||||
## Supported Projects
|
All projects will be prefixed with one of the following categories:
|
||||||
|
|
||||||
All projects will fall into one of the following categories:
|
- `device_`
|
||||||
|
- `os_`
|
||||||
|
- `software_`
|
||||||
|
- `podman_`
|
||||||
|
- `docker_`
|
||||||
|
- `kubernetes_`
|
||||||
|
|
||||||
- hardware
|
Note, some projects will be named with just the prefix. These are projects for
|
||||||
- infrastructure
|
configuring the underlying technology. The `podman` project, for example, will
|
||||||
- cloud
|
tell you how to configure and install podman so it works correctly.
|
||||||
- systemd
|
|
||||||
- podman
|
|
||||||
- docker
|
|
||||||
- kubernetes
|
|
||||||
|
|
||||||
Hardware will contain projects that relate to specific machines or equipment. 3D printers, Raspberry
|
`device_` will prefix projects that relate to specific machines or equipment.
|
||||||
Pis, and other IOT devices qualify as specialized hardware that needs documentation and
|
3D printers, Raspberry Pis, and other IOT devices qualify as specialized
|
||||||
configuration. This is not limited to computer equipment. The furnace is an important part of the
|
hardware that needs documentation and configuration. This is not limited to
|
||||||
home lab. the Air Conditioner is integral to the homelab's function. These projects will also be documented.
|
computer equipment. The furnace is an important part of the homelab. the Air
|
||||||
|
Conditioner is integral to the homelab's function. These projects will also be
|
||||||
|
documented.
|
||||||
|
|
||||||
Infrastructure will contain projects that set up the environments for the remaining listed project
|
`os_` will contain projects that set up operating systems. These include best
|
||||||
types. For example, infrastructure will contain "how to set up a linux box with docker" or "how to
|
practices, backups, updates, default software, etc.
|
||||||
set up a k3s cluster for kubernetes".
|
|
||||||
|
|
||||||
Cloud projects are for specific cloud providers.
|
`cloud_` projects are for specific cloud providers. This will contain
|
||||||
|
documentation and errata for things like AWS IAM, Route53, etc. Note these will
|
||||||
|
be prefixed with the cloud's name, not the word "cloud". So AWS services will
|
||||||
|
be prefixed with `aws_` and azure would be `azure_`. This should make them more
|
||||||
|
searchable.
|
||||||
|
|
||||||
Systemd projects are designed to be installed with ansible and run via systemd on a linux VM or
|
`software_` projects record configuration for common software agnostic to
|
||||||
other linux hardware.
|
operating system or linux flavor.
|
||||||
|
|
||||||
Podman projects are either designed to be run as quadlets or as podman containers outright.
|
`podman_` projects are either designed to be run as quadlets or as podman
|
||||||
|
containers outright.
|
||||||
|
|
||||||
Docker projects are either docker-compose or some form of docker run command.
|
`kubernetes_` projects are helm, kustomize, kubectl, or some other kubernetes
|
||||||
|
compliant deployment.
|
||||||
|
|
||||||
Kubernetes projects are helm, kustomize, kubectl, or some other kubernetes compliant deployment.
|
## Active Project Requirements
|
||||||
|
|
||||||
## Graduation Requirements
|
|
||||||
|
|
||||||
- [ ] Installation is documented
|
- [ ] Installation is documented
|
||||||
- [ ] Installation configuration examples are provided
|
- [ ] Installation configuration examples are provided
|
||||||
@@ -91,11 +291,30 @@ Kubernetes projects are helm, kustomize, kubectl, or some other kubernetes compl
|
|||||||
- [ ] If applicable, a replacement has been identified and documented
|
- [ ] If applicable, a replacement has been identified and documented
|
||||||
- [ ] If applicable, backup data locations are documented
|
- [ ] If applicable, backup data locations are documented
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
All projects will have, at minimum.
|
||||||
|
|
||||||
|
1. A README named `project-name.md`
|
||||||
|
2. A directory called `secrets` which will be gitignored.
|
||||||
|
|
||||||
|
## Creating a Project
|
||||||
|
|
||||||
|
Assuming your project name is `my-project` and it runs on `podman`
|
||||||
|
|
||||||
|
1. Create a new directory called `podman_my-project` under the `active`
|
||||||
|
directory
|
||||||
|
2. Copy the readme template: `cp project_readme_template.md
|
||||||
|
active/container_my-project/my-project.md`
|
||||||
|
3. Populate `my-project.md` as you work through the install process
|
||||||
|
4. Create a directory called `secrets` in `podman_my-project`. This will be
|
||||||
|
automatically gitignored. Put all secrets here.
|
||||||
|
5. Push the changes when you have a working product
|
||||||
|
|
||||||
## Order of Operations
|
## Order of Operations
|
||||||
|
|
||||||
1. Install cloud projects. These usually have no dependencies and typically provide critical services
|
1. Configure cloud providers. These usually have no dependencies and typically
|
||||||
to other projects (DNS, email notifications, etc.)
|
provide critical services to other projects (DNS, email notifications, etc.)
|
||||||
2. Install infrastructure projects. Usually these only have dependencies on cloud services.
|
2. Install infrastructure projects. Usually these only have dependencies on
|
||||||
3. Install systemd services. These are usually low-level programs that require a dedicated machine
|
cloud services.
|
||||||
and perform semi-critical functions (ipv4 proxy, ddns, etc.).
|
3. Install systemd, kubernetes, docker, podman, and other services.
|
||||||
4. Install kubernetes, docker, podman, and other services.
|
|
||||||
|
|||||||
12
active/aws_cli/aws_cli.md
Normal file
12
active/aws_cli/aws_cli.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
# AWS CLI
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run as root
|
||||||
|
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
|
||||||
|
unzip awscliv2.zip && \
|
||||||
|
./aws/install && \
|
||||||
|
rm -f ./awscliv2.zip && \
|
||||||
|
rm -rf ./aws
|
||||||
|
```
|
||||||
@@ -1,24 +1,33 @@
|
|||||||
# AWS Credentials
|
# AWS Credentials
|
||||||
|
|
||||||
## Credential Generation
|
Note: this requires the AWS CLI. See [AWS CLI](/active/aws_cli/aws_cli.md)
|
||||||
|
|
||||||
|
- [AWS Credentials](#aws-credentials)
|
||||||
|
- [Route53 Credential Generation](#route53-credential-generation)
|
||||||
|
- [AWS Certbot Route53 Policies](#aws-certbot-route53-policies)
|
||||||
|
- [Email Credentials](#email-credentials)
|
||||||
|
|
||||||
|
## Route53 Credential Generation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export AWS_USERNAME=
|
export AWS_USERNAME=
|
||||||
aws iam create-user --user-name $AWS_USERNAME
|
aws iam create-user --user-name $AWS_USERNAME
|
||||||
aws iam create-access-key --user-name $AWS_USERNAME
|
|
||||||
|
|
||||||
# Allow updating reeseapps
|
# Allow updating reeseapps
|
||||||
aws iam attach-user-policy --user-name $AWS_USERNAME --policy-arn $(cat cloud/graduated/aws_iam/secrets/update-reeseapps-iam-policy-arn)
|
aws iam attach-user-policy --user-name $AWS_USERNAME --policy-arn $(cat active/aws_iam/secrets/update-reeseapps-iam-policy-arn)
|
||||||
|
|
||||||
# Allow updating reeselink
|
# Allow updating reeselink
|
||||||
aws iam attach-user-policy --user-name $AWS_USERNAME --policy-arn $(cat cloud/graduated/aws_iam/secrets/update-reeselink-iam-policy-arn)
|
aws iam attach-user-policy --user-name $AWS_USERNAME --policy-arn $(cat active/aws_iam/secrets/update-reeselink-iam-policy-arn)
|
||||||
|
|
||||||
|
# Create credentials (run aws configure on the machine that needs these to input them manually)
|
||||||
|
aws iam create-access-key --user-name $AWS_USERNAME
|
||||||
```
|
```
|
||||||
|
|
||||||
## AWS Certbot Route53 Policies
|
## AWS Certbot Route53 Policies
|
||||||
|
|
||||||
Example Policy:
|
Example Policy:
|
||||||
|
|
||||||
cloud/graduated/aws_iam/secrets/policies/route53_reeselink.json
|
active/aws_iam/secrets/route53_reeselink.json
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
@@ -50,8 +59,21 @@ cloud/graduated/aws_iam/secrets/policies/route53_reeselink.json
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Allow updating route53 records for reeselink.com
|
# Allow updating route53 records for reeselink.com
|
||||||
aws iam create-policy --policy-name update-reeselink --policy-document file://cloud/graduated/aws_iam/secrets/route53_reeselink_policy.json
|
aws iam create-policy --policy-name update-reeselink --policy-document file://active/aws_iam/secrets/route53_reeselink_policy.json
|
||||||
|
|
||||||
# Allow updating route53 records for reeseapps.com
|
# Allow updating route53 records for reeseapps.com
|
||||||
aws iam create-policy --policy-name update-reeseapps --policy-document file://cloud/graduated/aws_iam/secrets/route53_reeseapps_policy.json
|
aws iam create-policy --policy-name update-reeseapps --policy-document file://active/aws_iam/secrets/route53_reeseapps_policy.json
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Email Credentials
|
||||||
|
|
||||||
|
<https://docs.aws.amazon.com/ses/latest/dg/smtp-credentials.html>
|
||||||
|
|
||||||
|
You can technically do this through the CLI, see above link.
|
||||||
|
|
||||||
|
1. Log into the AWS console
|
||||||
|
2. Navigate to SES
|
||||||
|
3. Click "SMTP Settings"
|
||||||
|
4. Click "Create SMTP Credentials"
|
||||||
|
5. Name it "ses-smtp-user.something"
|
||||||
|
6. Copy the username and password
|
||||||
@@ -17,14 +17,14 @@ convenience.
|
|||||||
|
|
||||||
## Reeselink Addresses
|
## Reeselink Addresses
|
||||||
|
|
||||||
See `example-record-file.json` for example contents of `file://cloud/graduated/aws_route53/secrets/aws/reeselink.json`.
|
See `example-record-file.json` for example contents of `file://active/aws_route53/secrets/aws/reeselink.json`.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
aws route53 change-resource-record-sets --hosted-zone-id $(cat cloud/graduated/aws_route53/secrets/reeselink-zoneid) --change-batch file://cloud/graduated/aws_route53/secrets/reeselink.json
|
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeselink-zoneid) --change-batch file://active/aws_route53/secrets/reeselink.json
|
||||||
```
|
```
|
||||||
|
|
||||||
## Reeseapps Addresses
|
## Reeseapps Addresses
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
aws route53 change-resource-record-sets --hosted-zone-id $(cat cloud/graduated/aws_route53/secrets/reeseapps-zoneid) --change-batch file://cloud/graduated/aws_route53/secrets/reeseapps.json
|
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeseapps-zoneid) --change-batch file://active/aws_route53/secrets/reeseapps.json
|
||||||
```
|
```
|
||||||
3
active/aws_ses/aws_ses.md
Normal file
3
active/aws_ses/aws_ses.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# AWS SES
|
||||||
|
|
||||||
|
AWS Simple Email Service
|
||||||
208
active/container_bifrost/bifrost.md
Normal file
208
active/container_bifrost/bifrost.md
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
# Podman bifrost
|
||||||
|
|
||||||
|
- [Podman bifrost](#podman-bifrost)
|
||||||
|
- [Setup bifrost Project](#setup-bifrost-project)
|
||||||
|
- [Install bifrost](#install-bifrost)
|
||||||
|
- [Create the ai user](#create-the-ai-user)
|
||||||
|
- [Write the bifrost compose spec](#write-the-bifrost-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert bifrost compose spec to quadlets](#convert-bifrost-compose-spec-to-quadlets)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose bifrost](#expose-bifrost)
|
||||||
|
- [Using bifrost](#using-bifrost)
|
||||||
|
- [Adding Models](#adding-models)
|
||||||
|
- [Testing Models](#testing-models)
|
||||||
|
- [Backup bifrost](#backup-bifrost)
|
||||||
|
- [Upgrade bifrost](#upgrade-bifrost)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup bifrost Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_bifrost
|
||||||
|
- [ ] Find and replace bifrost with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install bifrost
|
||||||
|
|
||||||
|
### Create the ai user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd ai
|
||||||
|
loginctl enable-linger $(id -u ai)
|
||||||
|
systemctl --user --machine=ai@.host enable podman-restart
|
||||||
|
systemctl --user --machine=ai@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/ai/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the bifrost compose spec
|
||||||
|
|
||||||
|
Edit the compose.yaml at active/container_bifrost/compose/compose.yaml
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/bifrost/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert bifrost compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_bifrost/compose:/compose \
|
||||||
|
-v $(pwd)/active/container_bifrost/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=ai-ai
|
||||||
|
scp -r active/container_bifrost/quadlets/. $PODMAN_SERVER:/home/ai/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart bifrost
|
||||||
|
journalctl --user -u bifrost -f
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose bifrost
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
## Using bifrost
|
||||||
|
|
||||||
|
### Adding Models
|
||||||
|
|
||||||
|
```json
|
||||||
|
// qwen3.5-35b-a3b-thinking
|
||||||
|
{
|
||||||
|
"temperature": 1,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-coding
|
||||||
|
{
|
||||||
|
"temperature": 0.6,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-instruct
|
||||||
|
{
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Models
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List models
|
||||||
|
curl -L -X GET 'https://aipi.reeseapps.com/v1/models' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234'
|
||||||
|
|
||||||
|
curl -L -X POST 'https://aipi.reeseapps.com/v1/chat/completions' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234' \
|
||||||
|
-d '{
|
||||||
|
"model": "gpt-4o-mini", # 👈 REPLACE with 'public model name' for any db-model
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"content": "Hey, how's it going",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup bifrost
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade bifrost
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-bifrost-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/bifrost/.config/containers/systemd/
|
||||||
|
ssh bifrost systemctl --user daemon-reload
|
||||||
|
ssh bifrost systemctl --user restart bifrost
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user bifrost is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel bifrost
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
3
active/container_bifrost/compose/README.md
Normal file
3
active/container_bifrost/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
32
active/container_bifrost/compose/compose.yaml
Normal file
32
active/container_bifrost/compose/compose.yaml
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
services:
|
||||||
|
bifrost:
|
||||||
|
image: docker.io/maximhq/bifrost:latest
|
||||||
|
container_name: bifrost
|
||||||
|
ports:
|
||||||
|
- "8000:8000"
|
||||||
|
volumes:
|
||||||
|
- bifrost-data:/app/data
|
||||||
|
environment:
|
||||||
|
- APP_PORT=8000
|
||||||
|
- APP_HOST=0.0.0.0
|
||||||
|
- LOG_LEVEL=info
|
||||||
|
- LOG_STYLE=json
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 65536
|
||||||
|
hard: 65536
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD",
|
||||||
|
"wget",
|
||||||
|
"--no-verbose",
|
||||||
|
"--tries=1",
|
||||||
|
"-O",
|
||||||
|
"/dev/null",
|
||||||
|
"http://localhost:8080/health",
|
||||||
|
]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
restart: unless-stopped
|
||||||
17
active/container_bifrost/quadlets/bifrost.container
Normal file
17
active/container_bifrost/quadlets/bifrost.container
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[Container]
|
||||||
|
ContainerName=bifrost
|
||||||
|
Environment=APP_PORT=8000 APP_HOST=0.0.0.0 LOG_LEVEL=info LOG_STYLE=json
|
||||||
|
HealthCmd=["wget", "--no-verbose", "--tries=1", "-O", "/dev/null", "http://localhost:8080/health"]
|
||||||
|
HealthInterval=30s
|
||||||
|
HealthRetries=3
|
||||||
|
HealthTimeout=10s
|
||||||
|
Image=docker.io/maximhq/bifrost:latest
|
||||||
|
PublishPort=8000:8000
|
||||||
|
Ulimit=nofile=65536:65536
|
||||||
|
Volume=bifrost-data:/app/data
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
89
active/container_bricktracker/bricktracker.md
Normal file
89
active/container_bricktracker/bricktracker.md
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# Brick Tracker
|
||||||
|
|
||||||
|
<https://gitea.baerentsen.space/FrederikBaerentsen/BrickTracker/src/branch/master/docs/quickstart.md>
|
||||||
|
|
||||||
|
## Update
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_bricktracker/connorbricktracker-compose.yaml bricktracker:
|
||||||
|
ssh bricktracker
|
||||||
|
docker compose -f connorbricktracker-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
### Create the bricktracker user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd bricktracker
|
||||||
|
loginctl enable-linger $(id -u bricktracker)
|
||||||
|
systemctl --user --machine=bricktracker@.host enable podman-restart
|
||||||
|
systemctl --user --machine=bricktracker@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/bricktracker/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure App
|
||||||
|
|
||||||
|
1. Copy the `.env.sample` from <https://gitea.baerentsen.space/FrederikBaerentsen/BrickTracker/src/branch/master/.env.sample> to `.env`
|
||||||
|
2. Set the following:
|
||||||
|
1. `BK_AUTHENTICATION_PASSWORD`
|
||||||
|
2. `BK_AUTHENTICATION_KEY`
|
||||||
|
3. `BK_DATABASE_PATH`
|
||||||
|
4. `BK_INSTRUCTIONS_FOLDER`
|
||||||
|
5. `BK_MINIFIGURES_FOLDER`
|
||||||
|
6. `BK_PARTS_FOLDER`
|
||||||
|
7. `BK_REBRICKABLE_API_KEY`
|
||||||
|
8. `BK_SETS_FOLDER`
|
||||||
|
3. Create the docker compose yaml
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
bricktracker:
|
||||||
|
container_name: BrickTracker
|
||||||
|
restart: unless-stopped
|
||||||
|
image: gitea.baerentsen.space/frederikbaerentsen/bricktracker:1.3.1
|
||||||
|
ports:
|
||||||
|
- "3333:3333"
|
||||||
|
volumes:
|
||||||
|
- ./data:/var/lib/bricktracker
|
||||||
|
- ./static/instructions:/app/static/instructions
|
||||||
|
- ./static/minifigures:/app/static/minifigures
|
||||||
|
- ./static/parts:/app/static/parts
|
||||||
|
- ./static/sets:/app/static/sets
|
||||||
|
env_file: ".env"
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Start the service: `docker compose up -d`
|
||||||
|
|
||||||
|
## Caddy
|
||||||
|
|
||||||
|
1. Create the new DNS record for your website
|
||||||
|
2. Create the Caddyfile at `./Caddyfile`
|
||||||
|
|
||||||
|
```conf
|
||||||
|
https://connors-legos.reeseapps.com:443 {
|
||||||
|
reverse_proxy 127.0.0.1:3333
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Create the Caddy compose.yaml
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
caddy:
|
||||||
|
image: caddy:<version>
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
- "443:443/udp"
|
||||||
|
volumes:
|
||||||
|
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||||
|
- caddy_data:/data
|
||||||
|
- caddy_config:/config
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
caddy_data:
|
||||||
|
caddy_config:
|
||||||
|
```
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
services:
|
||||||
|
bricktracker:
|
||||||
|
container_name: BrickTracker
|
||||||
|
restart: unless-stopped
|
||||||
|
image: gitea.baerentsen.space/frederikbaerentsen/bricktracker:1.3.1
|
||||||
|
ports:
|
||||||
|
- "3333:3333"
|
||||||
|
volumes:
|
||||||
|
- /srv/bricktracker/connorbricktracker/data:/var/lib/bricktracker
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/instructions:/app/static/instructions
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/minifigures:/app/static/minifigures
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/parts:/app/static/parts
|
||||||
|
- /srv/bricktracker/connorbricktracker/static/sets:/app/static/sets
|
||||||
|
env_file: "/srv/bricktracker/connorbricktracker/.env"
|
||||||
|
security_opt:
|
||||||
|
- label=disable
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
[Container]
|
||||||
|
ContainerName=BrickTracker
|
||||||
|
EnvironmentFile=/home/connorbricktracker/.env
|
||||||
|
Image=gitea.baerentsen.space/frederikbaerentsen/bricktracker:1.2.2
|
||||||
|
PublishPort=3333:3333
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/home/connorbricktracker/data:/var/lib/bricktracker
|
||||||
|
Volume=/home/connorbricktracker/static/instructions:/app/static/instructions
|
||||||
|
Volume=/home/connorbricktracker/static/minifigures:/app/static/minifigures
|
||||||
|
Volume=/home/connorbricktracker/static/parts:/app/static/parts
|
||||||
|
Volume=/home/connorbricktracker/static/sets:/app/static/sets
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
10
active/container_caddy/Containerfile
Normal file
10
active/container_caddy/Containerfile
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
FROM docker.io/caddy:2-builder AS builder
|
||||||
|
|
||||||
|
RUN xcaddy build \
|
||||||
|
--with github.com/caddy-dns/route53@v1.6.0 \
|
||||||
|
--with github.com/fabriziosalmi/caddy-waf
|
||||||
|
|
||||||
|
|
||||||
|
FROM docker.io/caddy:2
|
||||||
|
|
||||||
|
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
|
||||||
18
active/container_caddy/caddy.container
Normal file
18
active/container_caddy/caddy.container
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Caddy
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AddCapability=NET_ADMIN
|
||||||
|
ContainerName=caddy
|
||||||
|
Image=gitea.reeseapps.com/services/caddy:latest
|
||||||
|
Network=host
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/etc/caddy:/etc/caddy
|
||||||
|
Volume=caddy_data:/data
|
||||||
|
Volume=caddy_config:/config
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
210
active/container_caddy/caddy.md
Normal file
210
active/container_caddy/caddy.md
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
# Caddy Reverse Proxy
|
||||||
|
|
||||||
|
- [Caddy Reverse Proxy](#caddy-reverse-proxy)
|
||||||
|
- [Custom Caddy Image](#custom-caddy-image)
|
||||||
|
- [Install Caddy](#install-caddy)
|
||||||
|
- [Ansible](#ansible)
|
||||||
|
- [Manual](#manual)
|
||||||
|
- [Adding a new Caddy Record](#adding-a-new-caddy-record)
|
||||||
|
- [Logs](#logs)
|
||||||
|
- [Caddy WAF](#caddy-waf)
|
||||||
|
|
||||||
|
## Custom Caddy Image
|
||||||
|
|
||||||
|
This repo builds a custom caddy image with route53 DNS certbot support.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman image pull gitea.reeseapps.com/services/caddy:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
To upgrade the image, check [the caddy-dns route53
|
||||||
|
project](https://github.com/caddy-dns/route53/tags) releases and update the
|
||||||
|
`Containerfile` with the new version.
|
||||||
|
|
||||||
|
## Install Caddy
|
||||||
|
|
||||||
|
### Ansible
|
||||||
|
|
||||||
|
You'll need a secrets/Caddyfile with your caddy config.
|
||||||
|
|
||||||
|
`secrets/Caddyfile` example:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
https://something.reeseapps.com:443 {
|
||||||
|
reverse_proxy internal.reeselink.com:8000
|
||||||
|
}
|
||||||
|
|
||||||
|
https://something-else.reeseapps.com:443 {
|
||||||
|
reverse_proxy internal-other.reeselink.com:8080
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure to add [your route53 configuration](https://github.com/caddy-dns/route53?tab=readme-ov-file#configuration)
|
||||||
|
|
||||||
|
```conf
|
||||||
|
tls {
|
||||||
|
dns route53 {
|
||||||
|
access_key_id "..."
|
||||||
|
secret_access_key "..."
|
||||||
|
region "us-east-1"
|
||||||
|
wait_for_route53_sync true
|
||||||
|
skip_route53_sync_on_delete true
|
||||||
|
route53_max_wait 2m
|
||||||
|
max_retries 5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The playbook limits the installer to `hosts: caddy` so make sure you have a caddy
|
||||||
|
host in your inventory.
|
||||||
|
|
||||||
|
Now you can install the Caddy service with something like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Base Proxy
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
active/container_caddy/install_caddy_proxy.yaml
|
||||||
|
|
||||||
|
# Deskwork (AI) Proxy
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
active/container_caddy/install_caddy_deskwork.yaml
|
||||||
|
|
||||||
|
# Toybox (AI) Proxy
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
active/container_caddy/install_caddy_toybox.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
See ansible playbook [install_caddy.yaml](/active/container_caddy/install_caddy.yaml)
|
||||||
|
|
||||||
|
### Manual
|
||||||
|
|
||||||
|
As root
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir /etc/caddy
|
||||||
|
vim /etc/caddy/Caddyfile
|
||||||
|
```
|
||||||
|
|
||||||
|
Caddy will automatically provision certificates if the server DNS points to the correct IP
|
||||||
|
and is accessible on the ports specifified. All you need to do is put `https` in the caddy conf.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```conf
|
||||||
|
# Gitea
|
||||||
|
https://gitea.reeseapps.com:443 {
|
||||||
|
reverse_proxy podman.reeselink.com:3000
|
||||||
|
}
|
||||||
|
|
||||||
|
# Jellyfin
|
||||||
|
https://jellyfin.reeseapps.com:443 {
|
||||||
|
reverse_proxy podman.reeselink.com:8096
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
vim /etc/containers/systemd/caddy.container
|
||||||
|
```
|
||||||
|
|
||||||
|
```conf
|
||||||
|
[Unit]
|
||||||
|
Description=Caddy
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AddCapability=NET_ADMIN
|
||||||
|
ContainerName=caddy
|
||||||
|
Image=docker.io/caddy:2
|
||||||
|
Network=host
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/etc/caddy:/etc/caddy
|
||||||
|
Volume=caddy_data:/data
|
||||||
|
Volume=caddy_config:/config
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart caddy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Adding a new Caddy Record
|
||||||
|
|
||||||
|
Before you can create a Caddyfile you need records that point to your server.
|
||||||
|
|
||||||
|
You can either create them manually in your DNS provider of choice or use the provided
|
||||||
|
ddns service:
|
||||||
|
|
||||||
|
1. Update the [ddns caddy records](/active/container_ddns/secrets/caddy_records.yaml)
|
||||||
|
2. (Optional) Update the Caddyfile at `active/container_caddy/secrets/Caddyfile`
|
||||||
|
3. Run the [caddy ansible playbook](/active/container_caddy/caddy.md#install-caddy)
|
||||||
|
|
||||||
|
## Logs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Follow remote connections
|
||||||
|
podman logs -f caddy | grep -e '^{' | jq -c '.request | {remote_ip,host}'
|
||||||
|
|
||||||
|
# Filter out noisy hosts
|
||||||
|
podman logs -f caddy | grep -e '^{' | jq -c '.request | {remote_ip,host} | select(.host != "gitea.reeseapps.com")'
|
||||||
|
|
||||||
|
# Focus on user agents
|
||||||
|
podman logs -f caddy | grep -e '^{' | jq -c '
|
||||||
|
{
|
||||||
|
"User-Agent": .request.headers["User-Agent"],
|
||||||
|
remote_ip: .request.remote_ip,
|
||||||
|
host: .request.host,
|
||||||
|
status: .status
|
||||||
|
}
|
||||||
|
'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Caddy WAF
|
||||||
|
|
||||||
|
<https://github.com/fabriziosalmi/caddy-waf>
|
||||||
|
|
||||||
|
1. Copy the rules.json to `/etc/caddy/rules.json`
|
||||||
|
2. Update the Caddyfile to something like this:
|
||||||
|
|
||||||
|
```Caddyfile
|
||||||
|
gitea.reeseapps.com:443 {
|
||||||
|
log {
|
||||||
|
output stdout
|
||||||
|
format json {
|
||||||
|
message_key msg # Key for the log message
|
||||||
|
level_key severity # Key for the log level
|
||||||
|
time_key timestamp # Key for the timestamp
|
||||||
|
name_key logger # Key for the logger name
|
||||||
|
caller_key function # Key for the caller information
|
||||||
|
stacktrace_key stack # Key for error stacktraces
|
||||||
|
time_format "2006-01-02 15:04:05 MST" # RFC3339-like format
|
||||||
|
time_local # Use local timezone
|
||||||
|
duration_format "ms" # Show durations in milliseconds
|
||||||
|
level_format "upper" # Uppercase log levels
|
||||||
|
}
|
||||||
|
}
|
||||||
|
route {
|
||||||
|
waf {
|
||||||
|
metrics_endpoint /waf_metrics
|
||||||
|
rule_file rules.json
|
||||||
|
}
|
||||||
|
|
||||||
|
@wafmetrics {
|
||||||
|
path /waf_metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
handle @wafmetrics { } # empty → let the WAF serve the metrics
|
||||||
|
|
||||||
|
handle {
|
||||||
|
reverse_proxy gitea.reeselink.com:3000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
28
active/container_caddy/install_caddy_deskwork.yaml
Normal file
28
active/container_caddy/install_caddy_deskwork.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
- name: Create Caddy Proxy
|
||||||
|
hosts: deskwork-root
|
||||||
|
tasks:
|
||||||
|
- name: Create /etc/caddy dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/caddy
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
- name: Copy Caddyfile
|
||||||
|
template:
|
||||||
|
src: secrets/deskwork.Caddyfile
|
||||||
|
dest: /etc/caddy/Caddyfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Template Caddy Container Services
|
||||||
|
template:
|
||||||
|
src: caddy.container
|
||||||
|
dest: /etc/containers/systemd/caddy.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Reload and start the Caddy service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: caddy.service
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
45
active/container_caddy/install_caddy_proxy.yaml
Normal file
45
active/container_caddy/install_caddy_proxy.yaml
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
- name: Create Caddy Proxy
|
||||||
|
hosts: caddy
|
||||||
|
tasks:
|
||||||
|
- name: Copy Containerfile for build
|
||||||
|
template:
|
||||||
|
src: Containerfile
|
||||||
|
dest: /etc/caddy/Containerfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Build Caddy Image
|
||||||
|
shell:
|
||||||
|
cmd: podman build -t gitea.reeseapps.com/services/caddy:latest -f /etc/caddy/Containerfile
|
||||||
|
- name: Create /etc/caddy dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/caddy
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
- name: Copy Caddyfile
|
||||||
|
template:
|
||||||
|
src: secrets/proxy.Caddyfile
|
||||||
|
dest: /etc/caddy/Caddyfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Copy rules.json
|
||||||
|
template:
|
||||||
|
src: rules.json
|
||||||
|
dest: /etc/caddy/rules.json
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Template Caddy Container Services
|
||||||
|
template:
|
||||||
|
src: caddy.container
|
||||||
|
dest: /etc/containers/systemd/caddy.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Reload and start the Caddy service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: caddy.service
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
28
active/container_caddy/install_caddy_toybox.yaml
Normal file
28
active/container_caddy/install_caddy_toybox.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
- name: Create Caddy Proxy
|
||||||
|
hosts: toybox-root
|
||||||
|
tasks:
|
||||||
|
- name: Create /etc/caddy dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/caddy
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
- name: Copy Caddyfile
|
||||||
|
template:
|
||||||
|
src: secrets/toybox.Caddyfile
|
||||||
|
dest: /etc/caddy/Caddyfile
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Template Caddy Container Services
|
||||||
|
template:
|
||||||
|
src: caddy.container
|
||||||
|
dest: /etc/containers/systemd/caddy.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: "0644"
|
||||||
|
- name: Reload and start the Caddy service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: caddy.service
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
26
active/container_caddy/rules.json
Normal file
26
active/container_caddy/rules.json
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"id": "block-scanners",
|
||||||
|
"phase": 1,
|
||||||
|
"pattern": "(?i)(nikto|sqlmap|nmap|acunetix|nessus|openvas|wpscan|dirbuster|burpsuite|owasp zap|netsparker|appscan|arachni|skipfish|gobuster|wfuzz|hydra|metasploit|nessus|openvas|qualys|zap|w3af|openwebspider|netsparker|appspider|rapid7|nessus|qualys|nuclei|zgrab|vega|gospider|gxspider|whatweb|xspider|joomscan|uniscan|blindelephant)",
|
||||||
|
"targets": [
|
||||||
|
"HEADERS:User-Agent"
|
||||||
|
],
|
||||||
|
"severity": "CRITICAL",
|
||||||
|
"action": "block",
|
||||||
|
"score": 10,
|
||||||
|
"description": "Block traffic from known vulnerability scanners and penetration testing tools. Includes more scanners."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "block-crawlers",
|
||||||
|
"phase": 1,
|
||||||
|
"pattern": "(meta-externalagent)",
|
||||||
|
"targets": [
|
||||||
|
"HEADERS:User-Agent"
|
||||||
|
],
|
||||||
|
"severity": "CRITICAL",
|
||||||
|
"action": "block",
|
||||||
|
"score": 10,
|
||||||
|
"description": "Block traffic from web scrapers and crawlers."
|
||||||
|
}
|
||||||
|
]
|
||||||
173
active/container_certbot/certbot.md
Normal file
173
active/container_certbot/certbot.md
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
# Podman certbot
|
||||||
|
|
||||||
|
- [Podman certbot](#podman-certbot)
|
||||||
|
- [Setup certbot Project](#setup-certbot-project)
|
||||||
|
- [Install certbot](#install-certbot)
|
||||||
|
- [Create the certbot user](#create-the-certbot-user)
|
||||||
|
- [Write the certbot compose spec](#write-the-certbot-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert certbot compose spec to quadlets](#convert-certbot-compose-spec-to-quadlets)
|
||||||
|
- [Create any container-mounted directories](#create-any-container-mounted-directories)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose certbot](#expose-certbot)
|
||||||
|
- [firewalld](#firewalld)
|
||||||
|
- [Backup certbot](#backup-certbot)
|
||||||
|
- [Upgrade certbot](#upgrade-certbot)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup certbot Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_certbot
|
||||||
|
- [ ] Find and replace certbot with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install certbot
|
||||||
|
|
||||||
|
### Create the certbot user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd certbot
|
||||||
|
loginctl enable-linger $(id -u certbot)
|
||||||
|
systemctl --user --machine=certbot@.host enable podman-restart
|
||||||
|
systemctl --user --machine=certbot@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/certbot/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the certbot compose spec
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run -it --rm --name certbot \
|
||||||
|
-v "/etc/letsencrypt:/etc/letsencrypt:Z" \
|
||||||
|
-v "/var/lib/letsencrypt:/var/lib/letsencrypt:Z" \
|
||||||
|
certbot/certbot certonly -d keycloak.reeseapps.com -d keycloak.reeselink.com
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/certbot/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert certbot compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_certbot/:/compose \
|
||||||
|
-v $(pwd)/active/container_certbot/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r active/container_certbot/quadlets/. $PODMAN_SERVER:/home/certbot/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R certbot:certbot /home/certbot/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create any container-mounted directories
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell certbot@
|
||||||
|
podman unshare
|
||||||
|
mkdir some_volume
|
||||||
|
# Chown to the namespaced user with UID 1000
|
||||||
|
# This will be some really obscure UID outside the namespace
|
||||||
|
# This will also solve most permission denied errors
|
||||||
|
chown -R 1000:1000 some_volume
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell certbot@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart certbot
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose certbot
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
#### firewalld
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# command to get current active zone and default zone
|
||||||
|
firewall-cmd --get-active-zones
|
||||||
|
firewall-cmd --get-default-zone
|
||||||
|
|
||||||
|
# command to open 443 on tcp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
|
||||||
|
|
||||||
|
# command to open 80 and 443 on tcp and udp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
|
||||||
|
|
||||||
|
# command to list available services and then open http and https
|
||||||
|
firewall-cmd --get-services
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup certbot
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade certbot
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-certbot-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/certbot/.config/containers/systemd/
|
||||||
|
ssh certbot systemctl --user daemon-reload
|
||||||
|
ssh certbot systemctl --user restart certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user certbot is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
3
active/container_certbot/quadlets/README.md
Normal file
3
active/container_certbot/quadlets/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Quadlets
|
||||||
|
|
||||||
|
Put your quadlets here.
|
||||||
10
active/container_certbot/quadlets/certbot.service
Normal file
10
active/container_certbot/quadlets/certbot.service
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Runs certbot renew
|
||||||
|
After=syslog.target network.target auditd.service
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/bin/command -with -arguments
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
11
active/container_certbot/quadlets/certbot.timer
Normal file
11
active/container_certbot/quadlets/certbot.timer
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Daily certbot certificate renewal
|
||||||
|
|
||||||
|
[Timer]
|
||||||
|
OnCalendar=daily
|
||||||
|
AccuracySec=12h
|
||||||
|
Persistent=true
|
||||||
|
Unit=certbot.service
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=timers.target
|
||||||
1
active/container_ddns/.python-version
Normal file
1
active/container_ddns/.python-version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.13
|
||||||
7
active/container_ddns/.vscode/settings.json
vendored
Normal file
7
active/container_ddns/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"python.testing.pytestArgs": [
|
||||||
|
"."
|
||||||
|
],
|
||||||
|
"python.testing.unittestEnabled": false,
|
||||||
|
"python.testing.pytestEnabled": true
|
||||||
|
}
|
||||||
26
active/container_ddns/Containerfile
Normal file
26
active/container_ddns/Containerfile
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
FROM python:3.12-slim-bookworm
|
||||||
|
|
||||||
|
# The installer requires curl (and certificates) to download the release archive
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends curl ca-certificates
|
||||||
|
|
||||||
|
# Download the latest installer
|
||||||
|
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||||
|
|
||||||
|
# Run the installer then remove it
|
||||||
|
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||||
|
|
||||||
|
# Ensure the installed binary is on the `PATH`
|
||||||
|
ENV PATH="/root/.local/bin/:$PATH"
|
||||||
|
|
||||||
|
# Copy the project into the image
|
||||||
|
COPY update.py uv.lock pyproject.toml /app/
|
||||||
|
|
||||||
|
# Copy the records file
|
||||||
|
COPY records.yaml /etc/ddns/records.yaml
|
||||||
|
|
||||||
|
# Sync the project into a new environment, using the frozen lockfile
|
||||||
|
WORKDIR /app
|
||||||
|
RUN uv sync --frozen
|
||||||
|
|
||||||
|
# Presuming there is a `my_app` command provided by the project
|
||||||
|
CMD ["uv", "run", "update.py"]
|
||||||
20
active/container_ddns/ddns.container
Normal file
20
active/container_ddns/ddns.container
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=DDNS
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
Environment=ROUTE53_RECORDS_FILE=/etc/ddns/records.yaml
|
||||||
|
Environment=AWS_ACCESS_KEY_ID={{ aws.access_key_id }}
|
||||||
|
Environment=AWS_SECRET_ACCESS_KEY={{ aws.secret_access_key }}
|
||||||
|
{% if item.skip_ipv6 | default(false) %}
|
||||||
|
Environment=GLOBAL_SKIP_IPV6=true
|
||||||
|
{% endif %}
|
||||||
|
{% if item.skip_ipv4 | default(false) %}
|
||||||
|
Environment=GLOBAL_SKIP_IPV4=true
|
||||||
|
{% endif %}
|
||||||
|
Image=gitea.reeseapps.com/services/ddns:latest
|
||||||
|
Network=ddns.network
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
109
active/container_ddns/ddns.md
Normal file
109
active/container_ddns/ddns.md
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
# DDNS for Route53
|
||||||
|
|
||||||
|
- [DDNS for Route53](#ddns-for-route53)
|
||||||
|
- [Quickly Update DDNS Records](#quickly-update-ddns-records)
|
||||||
|
- [Install a New DDNS Service](#install-a-new-ddns-service)
|
||||||
|
- [Ansible Caddy Records](#ansible-caddy-records)
|
||||||
|
- [Development](#development)
|
||||||
|
- [Testing](#testing)
|
||||||
|
- [Building Container Image](#building-container-image)
|
||||||
|
|
||||||
|
This service will automatically keep ipv4 and ipv6 records updated in AWS
|
||||||
|
Route53.
|
||||||
|
|
||||||
|
**NOTE**: This requires the aws cli to be installed on each node with
|
||||||
|
credentials that can modify records in route53. See
|
||||||
|
[aws_iam](/active/aws_iam/aws_iam.md) and [aws_cli](/active/aws_cli/aws_cli.md)
|
||||||
|
|
||||||
|
## Quickly Update DDNS Records
|
||||||
|
|
||||||
|
In the event of a record change you can quickly trigger the ddns services with
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl start --all ddns*.service
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install a New DDNS Service
|
||||||
|
|
||||||
|
You need two files:
|
||||||
|
|
||||||
|
1. secrets/vars.yaml (with aws credentials)
|
||||||
|
2. secrets/records.yaml (with AWS records)
|
||||||
|
|
||||||
|
`secrets/vars.yaml` example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
aws:
|
||||||
|
access_key_id: key_here
|
||||||
|
secret_access_key: secret_here
|
||||||
|
```
|
||||||
|
|
||||||
|
`secrets/records.yaml` example:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
records:
|
||||||
|
- record: some.domain.com
|
||||||
|
hosted_zone_id: ABC123456789
|
||||||
|
- record: someother.domain.com
|
||||||
|
hosted_zone_id: ABC123456789
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you'll need to pick a server responsible for keeping those records
|
||||||
|
updated. Whichever host you run the service on will also be the host which
|
||||||
|
provides the public IP. Choose the host accordingly if it will be updating a
|
||||||
|
public IP on behalf of another server, as the IPv6 address will not be correct.
|
||||||
|
|
||||||
|
Now you can install the DDNS service with something like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
-l proxy \
|
||||||
|
active/container_ddns/install_ddns.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
See ansible playbook [install_ddns.yaml](/install_ddns.yaml)
|
||||||
|
|
||||||
|
It's recommended that you have multiple secret `foobar-records.yaml` files for
|
||||||
|
multiple servers. If you have a podman server, it'll have its own
|
||||||
|
`podman-records.yaml`. If you have a docker server, it'll have its own
|
||||||
|
`docker-records.yaml`. Etc. etc.
|
||||||
|
|
||||||
|
### Ansible Caddy Records
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ansible-playbook \
|
||||||
|
-i ansible/inventory.yaml \
|
||||||
|
-l caddy \
|
||||||
|
active/container_ddns/install_ddns.yaml \
|
||||||
|
-e "@active/container_ddns/secrets/records.yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ROUTE53_RECORD=test-ddns.reeseapps.com
|
||||||
|
export HOSTED_ZONE_ID=$(cat secrets/secret_vars.yaml | yq -r '.reeseapps_zone_id')
|
||||||
|
uv run update.py
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building Container Image
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build
|
||||||
|
podman build -t gitea.reeseapps.com/services/ddns:latest -f ./Containerfile
|
||||||
|
podman push gitea.reeseapps.com/services/ddns:latest
|
||||||
|
|
||||||
|
# Run
|
||||||
|
export ROUTE53_RECORD=test-ddns.reeseapps.com
|
||||||
|
export HOSTED_ZONE_ID=$(cat secrets/secret_vars.yaml | yq -r '.reeseapps_zone_id')
|
||||||
|
podman run \
|
||||||
|
-e ROUTE53_RECORD=$ROUTE53_RECORD \
|
||||||
|
-e HOSTED_ZONE_ID=$HOSTED_ZONE_ID \
|
||||||
|
-e AWS_PROFILE=prod \
|
||||||
|
-v $HOME/.aws:/root/.aws:Z \
|
||||||
|
-it --rm \
|
||||||
|
gitea.reeseapps.com/services/ddns:latest
|
||||||
|
```
|
||||||
8
active/container_ddns/ddns.network
Normal file
8
active/container_ddns/ddns.network
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=DDNS
|
||||||
|
|
||||||
|
[Network]
|
||||||
|
IPv6=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
Description=Run ddns service every hour
|
Description=Run ddns.service every hour
|
||||||
|
|
||||||
[Timer]
|
[Timer]
|
||||||
OnCalendar=hourly
|
OnCalendar=hourly
|
||||||
59
active/container_ddns/install_ddns.yaml
Normal file
59
active/container_ddns/install_ddns.yaml
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
- name: Create DDNS Service
|
||||||
|
hosts: all
|
||||||
|
vars_files:
|
||||||
|
- secrets/vars.yaml
|
||||||
|
tasks:
|
||||||
|
- name: Create container build dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/ddns
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
- name: Copy container build files
|
||||||
|
copy:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: /tmp/ddns/
|
||||||
|
with_items:
|
||||||
|
- uv.lock
|
||||||
|
- pyproject.toml
|
||||||
|
- update.py
|
||||||
|
- Containerfile
|
||||||
|
- secrets/records.yaml
|
||||||
|
- name: Run container build
|
||||||
|
shell:
|
||||||
|
cmd: podman build -t gitea.reeseapps.com/services/ddns:latest -f ./Containerfile
|
||||||
|
chdir: /tmp/ddns/
|
||||||
|
- name: Remove container build dir
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /tmp/ddns
|
||||||
|
state: absent
|
||||||
|
- name: Copy ddns.network
|
||||||
|
template:
|
||||||
|
src: ddns.network
|
||||||
|
dest: /etc/containers/systemd/ddns.network
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Template DDNS Container Service
|
||||||
|
template:
|
||||||
|
src: ddns.container
|
||||||
|
dest: /etc/containers/systemd/ddns.container
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Template DDNS Container Timer
|
||||||
|
template:
|
||||||
|
src: ddns.timer
|
||||||
|
dest: /etc/systemd/system/ddns.timer
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
- name: Reload ddns timer
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: ddns.timer
|
||||||
|
enabled: true
|
||||||
|
daemon_reload: true
|
||||||
|
- name: Run ddns service
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
state: restarted
|
||||||
|
name: ddns.service
|
||||||
13
active/container_ddns/pyproject.toml
Normal file
13
active/container_ddns/pyproject.toml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[project]
|
||||||
|
name = "ddns"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Add your description here"
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.10"
|
||||||
|
dependencies = [
|
||||||
|
"boto3>=1.37.30",
|
||||||
|
"boto3-stubs[all]>=1.38.23",
|
||||||
|
"pytest>=8.3.5",
|
||||||
|
"pyyaml>=6.0.3",
|
||||||
|
"types-pyyaml>=6.0.12.20250915",
|
||||||
|
]
|
||||||
32
active/container_ddns/test_update.py
Normal file
32
active/container_ddns/test_update.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from update import get_ipv4, get_ipv6
|
||||||
|
|
||||||
|
regex_match_ipv4 = (
|
||||||
|
r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
|
||||||
|
)
|
||||||
|
|
||||||
|
regex_match_ipv6 = (
|
||||||
|
r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:)"
|
||||||
|
r"{1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:)"
|
||||||
|
r"{1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
|
||||||
|
r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:"
|
||||||
|
r"[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4})"
|
||||||
|
r"{0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9])"
|
||||||
|
r"{0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
|
||||||
|
r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_get_ipv4():
|
||||||
|
ip = get_ipv4()
|
||||||
|
assert re.match(
|
||||||
|
regex_match_ipv4,
|
||||||
|
ip
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_get_ipv6():
|
||||||
|
ip = get_ipv6()
|
||||||
|
assert re.match(
|
||||||
|
regex_match_ipv6,
|
||||||
|
ip
|
||||||
|
)
|
||||||
173
active/container_ddns/update.py
Normal file
173
active/container_ddns/update.py
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
"""
|
||||||
|
export HOSTED_ZONE_ID=<aws hosted zone ID>
|
||||||
|
export ROUTE53_RECORD=something.mydomain.com
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
from typing import TYPE_CHECKING, TypedDict
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
|
||||||
|
try:
|
||||||
|
from yaml import CLoader as Loader
|
||||||
|
except ImportError:
|
||||||
|
from yaml import Loader # type: ignore
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from mypy_boto3_route53 import Route53Client
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
|
||||||
|
datefmt="%Y-%m-%d %H:%M:%S",
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
|
|
||||||
|
ROUTE53_RECORDS_FILE = os.getenv("ROUTE53_RECORDS_FILE")
|
||||||
|
GLOBAL_SKIP_IPV4 = os.getenv("GLOBAL_SKIP_IPV4", "false").lower() == "true"
|
||||||
|
GLOBAL_SKIP_IPV6 = os.getenv("GLOBAL_SKIP_IPV6", "false").lower() == "true"
|
||||||
|
|
||||||
|
|
||||||
|
class RecordType(TypedDict):
|
||||||
|
record: str
|
||||||
|
hosted_zone_id: str
|
||||||
|
skip_ipv4: bool | None
|
||||||
|
skip_ipv6: bool | None
|
||||||
|
|
||||||
|
|
||||||
|
class RecordYamlStruct(TypedDict):
|
||||||
|
records: list[RecordType]
|
||||||
|
|
||||||
|
|
||||||
|
def get_ipv4() -> str:
|
||||||
|
result = subprocess.run(["curl", "-4", "ifconfig.me"], capture_output=True)
|
||||||
|
return result.stdout.decode()
|
||||||
|
|
||||||
|
|
||||||
|
def get_ipv6() -> str:
|
||||||
|
result = subprocess.run(["curl", "-6", "ifconfig.me"], capture_output=True)
|
||||||
|
return result.stdout.decode()
|
||||||
|
|
||||||
|
|
||||||
|
def update_ipv4(hosted_zone_id: str, record: str, public_ipv4: str):
|
||||||
|
client: Route53Client = boto3.client("route53")
|
||||||
|
try:
|
||||||
|
logger.info("Calling upsert for ipv4.")
|
||||||
|
client.change_resource_record_sets(
|
||||||
|
HostedZoneId=hosted_zone_id,
|
||||||
|
ChangeBatch={
|
||||||
|
"Comment": "Update Public Addresses",
|
||||||
|
"Changes": [
|
||||||
|
{
|
||||||
|
"Action": "UPSERT",
|
||||||
|
"ResourceRecordSet": {
|
||||||
|
"Name": f"{record}",
|
||||||
|
"Type": "A",
|
||||||
|
"TTL": 300,
|
||||||
|
"ResourceRecords": [{"Value": public_ipv4}],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
logger.info(f"Successfully updated ipv4 for {record}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating ipv4 for {record}.")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
def update_ipv6(hosted_zone_id: str, record: str, public_ipv6: str):
|
||||||
|
client = boto3.client("route53")
|
||||||
|
try:
|
||||||
|
logger.info("Calling upsert for ipv6.")
|
||||||
|
client.change_resource_record_sets(
|
||||||
|
HostedZoneId=hosted_zone_id,
|
||||||
|
ChangeBatch={
|
||||||
|
"Comment": "Update Public Addresses",
|
||||||
|
"Changes": [
|
||||||
|
{
|
||||||
|
"Action": "UPSERT",
|
||||||
|
"ResourceRecordSet": {
|
||||||
|
"Name": f"{record}",
|
||||||
|
"Type": "AAAA",
|
||||||
|
"TTL": 300,
|
||||||
|
"ResourceRecords": [{"Value": public_ipv6}],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
logger.info(f"Successfully updated ipv6 for {record}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating ipv6 for {record}.")
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
if not ROUTE53_RECORDS_FILE:
|
||||||
|
logger.error("ROUTE53_RECORDS_FILE env var not found!")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(ROUTE53_RECORDS_FILE) as f:
|
||||||
|
records_file_contents: RecordYamlStruct = yaml.load(f, Loader)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.error(e)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if GLOBAL_SKIP_IPV4:
|
||||||
|
public_ipv4 = None
|
||||||
|
logger.warning("Globally skipping IPv4.")
|
||||||
|
else:
|
||||||
|
logger.info("Getting IPv4 address from ifconfig.me")
|
||||||
|
public_ipv4 = get_ipv4()
|
||||||
|
if not public_ipv4:
|
||||||
|
logger.error("Public IPv4 not found.")
|
||||||
|
exit(1)
|
||||||
|
logger.info(f"Public IPv4 is {public_ipv4}")
|
||||||
|
|
||||||
|
if GLOBAL_SKIP_IPV6:
|
||||||
|
public_ipv6 = None
|
||||||
|
logger.warning("Globally Skipping IPv6")
|
||||||
|
else:
|
||||||
|
logger.info("Getting IPv6 address from ifconfig.me")
|
||||||
|
public_ipv6 = get_ipv6()
|
||||||
|
if not public_ipv6:
|
||||||
|
logger.error("Public IPv6 not found.")
|
||||||
|
exit(1)
|
||||||
|
logger.info(f"Public IPv6 is {public_ipv6}")
|
||||||
|
|
||||||
|
for record in records_file_contents["records"]:
|
||||||
|
|
||||||
|
logger.info(f"Attempting to update {record['record']} from {record['hosted_zone_id']}.")
|
||||||
|
|
||||||
|
if record.get("skip_ipv4"):
|
||||||
|
logger.info(f"{record['record']} requested to skip IPv4")
|
||||||
|
elif GLOBAL_SKIP_IPV4 or not public_ipv4:
|
||||||
|
logger.info("Globally skipping IPv4")
|
||||||
|
else:
|
||||||
|
update_ipv4(
|
||||||
|
hosted_zone_id=record["hosted_zone_id"],
|
||||||
|
record=record["record"],
|
||||||
|
public_ipv4=public_ipv4,
|
||||||
|
)
|
||||||
|
|
||||||
|
if record.get("skip_ipv6"):
|
||||||
|
logger.info(f"{record['record']} requested to skip IPv6")
|
||||||
|
elif GLOBAL_SKIP_IPV6 or not public_ipv6:
|
||||||
|
logger.info("Globally skipping IPv6")
|
||||||
|
else:
|
||||||
|
update_ipv6(
|
||||||
|
hosted_zone_id=record["hosted_zone_id"],
|
||||||
|
record=record["record"],
|
||||||
|
public_ipv6=public_ipv6,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
5573
active/container_ddns/uv.lock
generated
Normal file
5573
active/container_ddns/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
41
active/container_elk/.env
Normal file
41
active/container_elk/.env
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Project namespace (defaults to the current folder name if not set)
|
||||||
|
#COMPOSE_PROJECT_NAME=myproject
|
||||||
|
|
||||||
|
|
||||||
|
# Password for the 'elastic' user (at least 6 characters)
|
||||||
|
ELASTIC_PASSWORD=changeme
|
||||||
|
|
||||||
|
|
||||||
|
# Password for the 'kibana_system' user (at least 6 characters)
|
||||||
|
KIBANA_PASSWORD=changeme
|
||||||
|
|
||||||
|
|
||||||
|
# Version of Elastic products
|
||||||
|
STACK_VERSION=8.7.1
|
||||||
|
|
||||||
|
|
||||||
|
# Set the cluster name
|
||||||
|
CLUSTER_NAME=docker-cluster
|
||||||
|
|
||||||
|
|
||||||
|
# Set to 'basic' or 'trial' to automatically start the 30-day trial
|
||||||
|
LICENSE=basic
|
||||||
|
#LICENSE=trial
|
||||||
|
|
||||||
|
|
||||||
|
# Port to expose Elasticsearch HTTP API to the host
|
||||||
|
ES_PORT=9200
|
||||||
|
|
||||||
|
|
||||||
|
# Port to expose Kibana to the host
|
||||||
|
KIBANA_PORT=5601
|
||||||
|
|
||||||
|
|
||||||
|
# Increase or decrease based on the available host memory (in bytes)
|
||||||
|
ES_MEM_LIMIT=1073741824
|
||||||
|
KB_MEM_LIMIT=1073741824
|
||||||
|
LS_MEM_LIMIT=1073741824
|
||||||
|
|
||||||
|
|
||||||
|
# SAMPLE Predefined Key only to be used in POC environments
|
||||||
|
ENCRYPTION_KEY=c34d38b3a14956121ff2170e5030b471551370178f43e5626eec58b04a30fae2
|
||||||
219
active/container_elk/elk-compose.yaml
Normal file
219
active/container_elk/elk-compose.yaml
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
certs:
|
||||||
|
driver: local
|
||||||
|
esdata01:
|
||||||
|
driver: local
|
||||||
|
kibanadata:
|
||||||
|
driver: local
|
||||||
|
metricbeatdata01:
|
||||||
|
driver: local
|
||||||
|
filebeatdata01:
|
||||||
|
driver: local
|
||||||
|
logstashdata01:
|
||||||
|
driver: local
|
||||||
|
|
||||||
|
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
name: elastic
|
||||||
|
external: false
|
||||||
|
|
||||||
|
|
||||||
|
services:
|
||||||
|
setup:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/elasticsearch/config/certs
|
||||||
|
user: "0"
|
||||||
|
command: >
|
||||||
|
bash -c '
|
||||||
|
if [ x${ELASTIC_PASSWORD} == x ]; then
|
||||||
|
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
|
||||||
|
exit 1;
|
||||||
|
elif [ x${KIBANA_PASSWORD} == x ]; then
|
||||||
|
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
|
||||||
|
exit 1;
|
||||||
|
fi;
|
||||||
|
if [ ! -f config/certs/ca.zip ]; then
|
||||||
|
echo "Creating CA";
|
||||||
|
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
|
||||||
|
unzip config/certs/ca.zip -d config/certs;
|
||||||
|
fi;
|
||||||
|
if [ ! -f config/certs/certs.zip ]; then
|
||||||
|
echo "Creating certs";
|
||||||
|
echo -ne \
|
||||||
|
"instances:\n"\
|
||||||
|
" - name: es01\n"\
|
||||||
|
" dns:\n"\
|
||||||
|
" - es01\n"\
|
||||||
|
" - localhost\n"\
|
||||||
|
" ip:\n"\
|
||||||
|
" - 127.0.0.1\n"\
|
||||||
|
" - name: kibana\n"\
|
||||||
|
" dns:\n"\
|
||||||
|
" - kibana\n"\
|
||||||
|
" - localhost\n"\
|
||||||
|
" ip:\n"\
|
||||||
|
" - 127.0.0.1\n"\
|
||||||
|
> config/certs/instances.yml;
|
||||||
|
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
|
||||||
|
unzip config/certs/certs.zip -d config/certs;
|
||||||
|
fi;
|
||||||
|
echo "Setting file permissions"
|
||||||
|
chown -R root:root config/certs;
|
||||||
|
find . -type d -exec chmod 750 \{\} \;;
|
||||||
|
find . -type f -exec chmod 640 \{\} \;;
|
||||||
|
echo "Waiting for Elasticsearch availability";
|
||||||
|
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
|
||||||
|
echo "Setting kibana_system password";
|
||||||
|
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
|
||||||
|
echo "All done!";
|
||||||
|
'
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
|
||||||
|
interval: 1s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 120
|
||||||
|
|
||||||
|
es01:
|
||||||
|
depends_on:
|
||||||
|
setup:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch:${STACK_VERSION}
|
||||||
|
labels:
|
||||||
|
co.elastic.logs/module: elasticsearch
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/elasticsearch/config/certs
|
||||||
|
- esdata01:/usr/share/elasticsearch/data
|
||||||
|
ports:
|
||||||
|
- ${ES_PORT}:9200
|
||||||
|
environment:
|
||||||
|
- node.name=es01
|
||||||
|
- cluster.name=${CLUSTER_NAME}
|
||||||
|
- discovery.type=single-node
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- bootstrap.memory_lock=true
|
||||||
|
- xpack.security.enabled=true
|
||||||
|
- xpack.security.http.ssl.enabled=true
|
||||||
|
- xpack.security.http.ssl.key=certs/es01/es01.key
|
||||||
|
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
|
||||||
|
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
|
||||||
|
- xpack.security.transport.ssl.enabled=true
|
||||||
|
- xpack.security.transport.ssl.key=certs/es01/es01.key
|
||||||
|
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
|
||||||
|
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
|
||||||
|
- xpack.security.transport.ssl.verification_mode=certificate
|
||||||
|
- xpack.license.self_generated.type=${LICENSE}
|
||||||
|
mem_limit: ${ES_MEM_LIMIT}
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD-SHELL",
|
||||||
|
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
|
||||||
|
]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 120
|
||||||
|
|
||||||
|
kibana:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
|
||||||
|
labels:
|
||||||
|
co.elastic.logs/module: kibana
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/kibana/config/certs
|
||||||
|
- kibanadata:/usr/share/kibana/data
|
||||||
|
ports:
|
||||||
|
- ${KIBANA_PORT}:5601
|
||||||
|
environment:
|
||||||
|
- SERVERNAME=kibana
|
||||||
|
- ELASTICSEARCH_HOSTS=https://es01:9200
|
||||||
|
- ELASTICSEARCH_USERNAME=kibana_system
|
||||||
|
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
|
||||||
|
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
|
||||||
|
- XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||||
|
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||||
|
- XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||||
|
mem_limit: ${KB_MEM_LIMIT}
|
||||||
|
healthcheck:
|
||||||
|
test:
|
||||||
|
[
|
||||||
|
"CMD-SHELL",
|
||||||
|
"curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'",
|
||||||
|
]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 120
|
||||||
|
|
||||||
|
metricbeat01:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
kibana:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/beats/metricbeat:${STACK_VERSION}
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/metricbeat/certs
|
||||||
|
- metricbeatdata01:/usr/share/metricbeat/data
|
||||||
|
- "./metricbeat.yaml:/usr/share/metricbeat/metricbeat.yml:ro"
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
- "/sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro"
|
||||||
|
- "/proc:/hostfs/proc:ro"
|
||||||
|
- "/:/hostfs:ro"
|
||||||
|
environment:
|
||||||
|
- ELASTIC_USER=elastic
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- ELASTIC_HOSTS=https://es01:9200
|
||||||
|
- KIBANA_HOSTS=http://kibana:5601
|
||||||
|
- LOGSTASH_HOSTS=http://logstash01:9600
|
||||||
|
|
||||||
|
filebeat01:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/beats/filebeat:${STACK_VERSION}
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/filebeat/certs
|
||||||
|
- filebeatdata01:/usr/share/filebeat/data
|
||||||
|
- "./filebeat_ingest_data/:/usr/share/filebeat/ingest_data/"
|
||||||
|
- "./filebeat.yaml:/usr/share/filebeat/filebeat.yml:ro"
|
||||||
|
- "/var/lib/docker/containers:/var/lib/docker/containers:ro"
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
environment:
|
||||||
|
- ELASTIC_USER=elastic
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- ELASTIC_HOSTS=https://es01:9200
|
||||||
|
- KIBANA_HOSTS=http://kibana:5601
|
||||||
|
- LOGSTASH_HOSTS=http://logstash01:9600
|
||||||
|
|
||||||
|
logstash01:
|
||||||
|
depends_on:
|
||||||
|
es01:
|
||||||
|
condition: service_healthy
|
||||||
|
kibana:
|
||||||
|
condition: service_healthy
|
||||||
|
image: docker.elastic.co/logstash/logstash:${STACK_VERSION}
|
||||||
|
labels:
|
||||||
|
co.elastic.logs/module: logstash
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- certs:/usr/share/logstash/certs
|
||||||
|
- logstashdata01:/usr/share/logstash/data
|
||||||
|
- "./logstash_ingest_data/:/usr/share/logstash/ingest_data/"
|
||||||
|
- "./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro"
|
||||||
|
environment:
|
||||||
|
- xpack.monitoring.enabled=false
|
||||||
|
- ELASTIC_USER=elastic
|
||||||
|
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||||
|
- ELASTIC_HOSTS=https://es01:9200
|
||||||
14
active/container_elk/elk.md
Normal file
14
active/container_elk/elk.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Elk Stack
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
<https://www.elastic.co/blog/getting-started-with-the-elastic-stack-and-docker-compose>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy over the files
|
||||||
|
scp -rp active/container_elk/. elk:elk
|
||||||
|
# SSH into the host
|
||||||
|
ssh -t elk "cd elk ; bash --login"
|
||||||
|
# Run the services
|
||||||
|
docker compose -f elk-compose.yaml up
|
||||||
|
```
|
||||||
29
active/container_elk/filebeat.yaml
Normal file
29
active/container_elk/filebeat.yaml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
filebeat.inputs:
|
||||||
|
- type: filestream
|
||||||
|
id: default-filestream
|
||||||
|
paths:
|
||||||
|
- ingest_data/*.log
|
||||||
|
|
||||||
|
|
||||||
|
filebeat.autodiscover:
|
||||||
|
providers:
|
||||||
|
- type: docker
|
||||||
|
hints.enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
processors:
|
||||||
|
- add_docker_metadata: ~
|
||||||
|
|
||||||
|
|
||||||
|
setup.kibana:
|
||||||
|
host: ${KIBANA_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
|
||||||
|
|
||||||
|
output.elasticsearch:
|
||||||
|
hosts: ${ELASTIC_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
ssl.enabled: true
|
||||||
|
ssl.certificate_authorities: "certs/ca/ca.crt"
|
||||||
24
active/container_elk/logstash.conf
Normal file
24
active/container_elk/logstash.conf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
input {
|
||||||
|
file {
|
||||||
|
#https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html
|
||||||
|
#default is TAIL which assumes more data will come into the file.
|
||||||
|
#change to mode => "read" if the file is a compelte file. by default, the file will be removed once reading is complete -- backup your files if you need them.
|
||||||
|
mode => "tail"
|
||||||
|
path => "/usr/share/logstash/ingest_data/*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
filter {
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
output {
|
||||||
|
elasticsearch {
|
||||||
|
index => "logstash-%{+YYYY.MM.dd}"
|
||||||
|
hosts=> "${ELASTIC_HOSTS}"
|
||||||
|
user=> "${ELASTIC_USER}"
|
||||||
|
password=> "${ELASTIC_PASSWORD}"
|
||||||
|
cacert=> "certs/ca/ca.crt"
|
||||||
|
}
|
||||||
|
}
|
||||||
62
active/container_elk/metricbeat.yaml
Normal file
62
active/container_elk/metricbeat.yaml
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
metricbeat.config.modules:
|
||||||
|
path: ${path.config}/modules.d/*.yml
|
||||||
|
reload.enabled: false
|
||||||
|
|
||||||
|
|
||||||
|
metricbeat.modules:
|
||||||
|
- module: elasticsearch
|
||||||
|
xpack.enabled: true
|
||||||
|
period: 10s
|
||||||
|
hosts: ${ELASTIC_HOSTS}
|
||||||
|
ssl.certificate_authorities: "certs/ca/ca.crt"
|
||||||
|
ssl.certificate: "certs/es01/es01.crt"
|
||||||
|
ssl.key: "certs/es01/es01.key"
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
ssl.enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
- module: logstash
|
||||||
|
xpack.enabled: true
|
||||||
|
period: 10s
|
||||||
|
hosts: ${LOGSTASH_HOSTS}
|
||||||
|
|
||||||
|
|
||||||
|
- module: kibana
|
||||||
|
metricsets:
|
||||||
|
- stats
|
||||||
|
period: 10s
|
||||||
|
hosts: ${KIBANA_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
xpack.enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
- module: docker
|
||||||
|
metricsets:
|
||||||
|
- "container"
|
||||||
|
- "cpu"
|
||||||
|
- "diskio"
|
||||||
|
- "healthcheck"
|
||||||
|
- "info"
|
||||||
|
#- "image"
|
||||||
|
- "memory"
|
||||||
|
- "network"
|
||||||
|
hosts: ["unix:///var/run/docker.sock"]
|
||||||
|
period: 10s
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
|
||||||
|
processors:
|
||||||
|
- add_host_metadata: ~
|
||||||
|
- add_docker_metadata: ~
|
||||||
|
|
||||||
|
|
||||||
|
output.elasticsearch:
|
||||||
|
hosts: ${ELASTIC_HOSTS}
|
||||||
|
username: ${ELASTIC_USER}
|
||||||
|
password: ${ELASTIC_PASSWORD}
|
||||||
|
ssl:
|
||||||
|
certificate: "certs/es01/es01.crt"
|
||||||
|
certificate_authorities: "certs/ca/ca.crt"
|
||||||
|
key: "certs/es01/es01.key"
|
||||||
48
active/container_gitea/compose/compose.yaml
Normal file
48
active/container_gitea/compose/compose.yaml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
services:
|
||||||
|
gitea:
|
||||||
|
image: docker.gitea.com/gitea:1.25-rootless
|
||||||
|
container_name: gitea
|
||||||
|
environment:
|
||||||
|
- GITEA__database__DB_TYPE=postgres
|
||||||
|
- GITEA__database__HOST=postgres:5432
|
||||||
|
- GITEA__database__NAME=gitea
|
||||||
|
- GITEA__database__USER=gitea
|
||||||
|
- GITEA__database__PASSWD=gitea
|
||||||
|
security_opt:
|
||||||
|
- "label=disable"
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- /home/gitea/gitea_data:/data:Z
|
||||||
|
- /home/gitea/gitea_etc:/etc/gitea:Z
|
||||||
|
- /home/gitea/gitea_custom:/var/lib/gitea/custom:Z
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
- "2222:2222"
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
labels:
|
||||||
|
- "io.containers.autoupdate=registry"
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: docker.io/library/postgres:15
|
||||||
|
container_name: postgres
|
||||||
|
security_opt:
|
||||||
|
- "label=disable"
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=gitea
|
||||||
|
- POSTGRES_PASSWORD=gitea
|
||||||
|
- POSTGRES_DB=gitea
|
||||||
|
networks:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- /home/gitea/gitea_postgres:/var/lib/postgresql/data:Z
|
||||||
|
labels:
|
||||||
|
- "io.containers.autoupdate=registry"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
gitea:
|
||||||
|
enable_ipv6: true
|
||||||
41
active/container_gitea/gitea-compose.yaml
Normal file
41
active/container_gitea/gitea-compose.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
networks:
|
||||||
|
gitea:
|
||||||
|
external: false
|
||||||
|
|
||||||
|
services:
|
||||||
|
server:
|
||||||
|
image: docker.gitea.com/gitea:1.25.3
|
||||||
|
container_name: gitea
|
||||||
|
environment:
|
||||||
|
- USER_UID=1001
|
||||||
|
- USER_GID=1001
|
||||||
|
- GITEA__database__DB_TYPE=postgres
|
||||||
|
- GITEA__database__HOST=db:5432
|
||||||
|
- GITEA__database__NAME=gitea
|
||||||
|
- GITEA__database__USER=gitea
|
||||||
|
- GITEA__database__PASSWD=gitea
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- /srv/gitea-data/data:/data
|
||||||
|
- /srv/gitea-data/custom:/var/lib/gitea/custom
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
- "22:22"
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
|
||||||
|
db:
|
||||||
|
image: docker.io/library/postgres:15
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=gitea
|
||||||
|
- POSTGRES_PASSWORD=gitea
|
||||||
|
- POSTGRES_DB=gitea
|
||||||
|
networks:
|
||||||
|
- gitea
|
||||||
|
volumes:
|
||||||
|
- /srv/gitea-db/postgres:/var/lib/postgresql/data
|
||||||
238
active/container_gitea/gitea.md
Normal file
238
active/container_gitea/gitea.md
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
# Gitea
|
||||||
|
|
||||||
|
- [Gitea](#gitea)
|
||||||
|
- [Gitea on Docker](#gitea-on-docker)
|
||||||
|
- [Gitea on Rootless Podman](#gitea-on-rootless-podman)
|
||||||
|
- [A note on directories](#a-note-on-directories)
|
||||||
|
- [Create the gitea user](#create-the-gitea-user)
|
||||||
|
- [Convert Compose to Quadlet](#convert-compose-to-quadlet)
|
||||||
|
- [Install Quadlets](#install-quadlets)
|
||||||
|
- [Upgrade](#upgrade)
|
||||||
|
- [Editing Gitea Config](#editing-gitea-config)
|
||||||
|
- [Gitea Runners](#gitea-runners)
|
||||||
|
- [Firewall Rules](#firewall-rules)
|
||||||
|
- [Install](#install)
|
||||||
|
- [Cache Cleanup](#cache-cleanup)
|
||||||
|
- [Email Notifications](#email-notifications)
|
||||||
|
|
||||||
|
## Gitea on Docker
|
||||||
|
|
||||||
|
<https://docs.gitea.com/installation/install-with-docker>
|
||||||
|
|
||||||
|
Prereqs
|
||||||
|
|
||||||
|
1. Change the default SSH port for your server to 2022 (or something similar).
|
||||||
|
2. Allow SSH to bind to that port: `semanage port -a -t ssh_port_t -p tcp 2022`
|
||||||
|
3. Allow 2022 on the firewall: `firewall-cmd --add-port=2022/tcp --permanent && firewall-cmd --reload`
|
||||||
|
4. Mount data dirs at `/srv/gitea-data` and `/srv/gitea-db`
|
||||||
|
5. Create a gitea user and update gitea-compose.yaml with the correct UID
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_gitea/gitea-compose.yaml gitea:
|
||||||
|
docker compose -f gitea-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Gitea on Rootless Podman
|
||||||
|
|
||||||
|
### A note on directories
|
||||||
|
|
||||||
|
```bash
|
||||||
|
2025/07/30 16:49:12 cmd/web.go:116:showWebStartupMessage() [I] * AppPath: /usr/local/bin/gitea
|
||||||
|
2025/07/30 16:49:12 cmd/web.go:117:showWebStartupMessage() [I] * WorkPath: /var/lib/gitea
|
||||||
|
2025/07/30 16:49:12 cmd/web.go:118:showWebStartupMessage() [I] * CustomPath: /var/lib/gitea/custom
|
||||||
|
2025/07/30 16:49:12 cmd/web.go:119:showWebStartupMessage() [I] * ConfigFile: /etc/gitea/app.ini
|
||||||
|
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:176:initAttachments() [I] Initialising Attachment storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/attachments
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:166:initAvatars() [I] Initialising Avatar storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/avatars
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:192:initRepoAvatars() [I] Initialising Repository Avatar storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/repo-avatars
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:198:initRepoArchives() [I] Initialising Repository Archive storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/repo-archive
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:208:initPackages() [I] Initialising Packages storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/packages
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:219:initActions() [I] Initialising Actions storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/actions_log
|
||||||
|
2025/07/30 16:49:12 modules/storage/storage.go:223:initActions() [I] Initialising ActionsArtifacts storage with type: local
|
||||||
|
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/actions_artifacts
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create the gitea user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
useradd gitea
|
||||||
|
loginctl enable-linger $(id -u gitea)
|
||||||
|
systemctl --user --machine=gitea@.host enable podman-restart
|
||||||
|
systemctl --user --machine=gitea@.host enable --now podman.socket
|
||||||
|
su -l gitea
|
||||||
|
mkdir -p .config/containers/systemd
|
||||||
|
mkdir data config postgres
|
||||||
|
exit
|
||||||
|
```
|
||||||
|
|
||||||
|
### Convert Compose to Quadlet
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run this in Homelab, not on the server.
|
||||||
|
mkdir $(pwd)/active/container_gitea/quadlets
|
||||||
|
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--network none \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_gitea/compose:$(pwd)/active/container_gitea/compose:z \
|
||||||
|
-v $(pwd)/active/container_gitea/quadlets:$(pwd)/active/container_gitea/quadlets:z \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f $(pwd)/active/container_gitea/quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose $(pwd)/active/container_gitea/compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
scp -r $(pwd)/active/container_gitea/quadlets/. 3dserver:/home/gitea/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install Quadlets
|
||||||
|
|
||||||
|
First, set up the volumes needed by the container.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enter the container namespace
|
||||||
|
podman unshare
|
||||||
|
|
||||||
|
# Create the volumes
|
||||||
|
mkdir gitea_data
|
||||||
|
chown -R 1000:1000 gitea_data
|
||||||
|
mkdir gitea_etc
|
||||||
|
chown -R 1000:1000 gitea_etc
|
||||||
|
exit
|
||||||
|
```
|
||||||
|
|
||||||
|
Now launch the service. The first user you register will be the admin.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create a systemctl viable shell
|
||||||
|
machinectl shell gitea@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart gitea postgres
|
||||||
|
# Enables auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Upgrade
|
||||||
|
|
||||||
|
1. Check [the blog](https://blog.gitea.com/) for any breaking changes.
|
||||||
|
2. Update the `compose.yaml` with any needed changes
|
||||||
|
3. [Regenerate the quadlets](#convert-compose-to-quadlet)
|
||||||
|
4. Upload the new quadlets and restart the service
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Upload quadlets and restart
|
||||||
|
export PODMAN_SERVER=3dserver
|
||||||
|
scp -r active/container_gitea/quadlets/. $PODMAN_SERVER:/home/gitea/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R gitea:gitea /home/gitea/.config/containers/systemd/
|
||||||
|
|
||||||
|
ssh $PODMAN_SERVER
|
||||||
|
machinectl shell gitea@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart gitea postgres
|
||||||
|
```
|
||||||
|
|
||||||
|
### Editing Gitea Config
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Use podman unshare to work within the container's namespace
|
||||||
|
podman unshare vim ~/gitea_data/gitea/conf/app.ini
|
||||||
|
```
|
||||||
|
|
||||||
|
## Gitea Runners
|
||||||
|
|
||||||
|
<https://docs.gitea.com/next/usage/actions/act-runner/#install-with-the-docker-image>
|
||||||
|
|
||||||
|
### Firewall Rules
|
||||||
|
|
||||||
|
Since our runner will be contacting our public IP, we need to add a firewall rule to allow
|
||||||
|
traffic from our DMZ network to our DMZ network. Do this in Unifi or whatever equivalent
|
||||||
|
you have.
|
||||||
|
|
||||||
|
### Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export GITEA_TOKEN=
|
||||||
|
docker run \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
-e GITEA_INSTANCE_URL=https://gitea.reeseapps.com \
|
||||||
|
-e GITEA_RUNNER_REGISTRATION_TOKEN=$GITEA_TOKEN \
|
||||||
|
-e GITEA_RUNNER_NAME=gitea_runner \
|
||||||
|
--restart always \
|
||||||
|
--name gitea_runner \
|
||||||
|
-d docker.io/gitea/act_runner:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
### Cache Cleanup
|
||||||
|
|
||||||
|
Each org or project with a package registry will have its own cleanup rules. For example,
|
||||||
|
services -> settings -> Packages -> Add Cleanup Rule will allow you to create a cleanup
|
||||||
|
rule for packages stored under the "services" org. These cleanup rules should run automatically.
|
||||||
|
|
||||||
|
You'll need to enable `cron` and `cron.cleanup_packages` in the app.ini (/data/gitea/conf).
|
||||||
|
|
||||||
|
Cron: <https://docs.gitea.com/administration/config-cheat-sheet#cron-cron>
|
||||||
|
|
||||||
|
Package Cleanup: <https://docs.gitea.com/1.19/administration/config-cheat-sheet#cron---cleanup-hook_task-table-croncleanup_hook_task_table>
|
||||||
|
|
||||||
|
```conf
|
||||||
|
[cron]
|
||||||
|
ENABLED = true
|
||||||
|
RUN_AT_START = true
|
||||||
|
NOTICE_ON_SUCCESS = true
|
||||||
|
SCHEDULE = @midnight
|
||||||
|
|
||||||
|
[cron.cleanup_packages]
|
||||||
|
ENABLED = true
|
||||||
|
RUN_AT_START = true
|
||||||
|
SCHEDULE = @midnight
|
||||||
|
NOTICE_ON_SUCCESS = true
|
||||||
|
```
|
||||||
|
|
||||||
|
On the other hand, the docker builder cache will balloon out of control over time. The gitea
|
||||||
|
docker runner is handled outside of Gitea's context, so you'll need to clean it up yourself.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check used system resources
|
||||||
|
docker system df
|
||||||
|
```
|
||||||
|
|
||||||
|
You should run something like this on a schedule:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Prune the builder cache
|
||||||
|
docker builder prune -a
|
||||||
|
```
|
||||||
|
|
||||||
|
To run it every day at noon: `crontab -e`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
dnf install cronie cronie-anacron
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
0 12 * * * yes | docker builder prune -a
|
||||||
|
0 12 * * * docker image prune -a -f
|
||||||
|
```
|
||||||
|
|
||||||
|
## Email Notifications
|
||||||
|
|
||||||
|
In `/data/gitea/conf/app.ini` add (yes, the `` around the password matters):
|
||||||
|
|
||||||
|
```conf
|
||||||
|
[mailer]
|
||||||
|
ENABLED = true
|
||||||
|
FROM = gitea@reeseapps.com
|
||||||
|
PROTOCOL = smtps
|
||||||
|
SMTP_ADDR = email-smtp.us-east-1.amazonaws.com
|
||||||
|
SMTP_PORT = 465
|
||||||
|
USER = ABC123
|
||||||
|
PASSWD = `ABC123...`
|
||||||
|
```
|
||||||
22
active/container_gitea/quadlets/gitea.container
Normal file
22
active/container_gitea/quadlets/gitea.container
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[Unit]
|
||||||
|
Requires=postgres.service
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
AutoUpdate=registry
|
||||||
|
ContainerName=gitea
|
||||||
|
Environment=GITEA__database__DB_TYPE=postgres GITEA__database__HOST=postgres:5432 GITEA__database__NAME=gitea GITEA__database__USER=gitea GITEA__database__PASSWD=gitea
|
||||||
|
Image=docker.gitea.com/gitea:1.25-rootless
|
||||||
|
Network=gitea.network
|
||||||
|
PublishPort=3000:3000
|
||||||
|
PublishPort=2222:2222
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/home/gitea/gitea_data:/data:Z
|
||||||
|
Volume=/home/gitea/gitea_etc:/etc/gitea:Z
|
||||||
|
Volume=/home/gitea/gitea_custom:/var/lib/gitea/custom:Z
|
||||||
|
Volume=/etc/localtime:/etc/localtime:ro
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
5
active/container_gitea/quadlets/gitea.network
Normal file
5
active/container_gitea/quadlets/gitea.network
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[Network]
|
||||||
|
IPv6=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
14
active/container_gitea/quadlets/postgres.container
Normal file
14
active/container_gitea/quadlets/postgres.container
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[Container]
|
||||||
|
AutoUpdate=registry
|
||||||
|
ContainerName=postgres
|
||||||
|
Environment=POSTGRES_USER=gitea POSTGRES_PASSWORD=gitea POSTGRES_DB=gitea
|
||||||
|
Image=docker.io/library/postgres:15
|
||||||
|
Network=gitea.network
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/home/gitea/gitea_postgres:/var/lib/postgresql/data:Z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
16
active/container_gitlab/gitlab-compose.yaml
Normal file
16
active/container_gitlab/gitlab-compose.yaml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
services:
|
||||||
|
gitlab:
|
||||||
|
image: gitlab/gitlab-ce:18.7.3-ce.0
|
||||||
|
container_name: gitlab
|
||||||
|
restart: always
|
||||||
|
hostname: 'gitlab.example.com'
|
||||||
|
ports:
|
||||||
|
- '80:80'
|
||||||
|
- '443:443'
|
||||||
|
- '22:22'
|
||||||
|
volumes:
|
||||||
|
- '$GITLAB_HOME/gitlab.rb:/etc/gitlab/gitlab.rb:ro'
|
||||||
|
- '$GITLAB_HOME/config:/etc/gitlab'
|
||||||
|
- '$GITLAB_HOME/logs:/var/log/gitlab'
|
||||||
|
- '$GITLAB_HOME/data:/var/opt/gitlab'
|
||||||
|
shm_size: '256m'
|
||||||
66
active/container_gitlab/gitlab.md
Normal file
66
active/container_gitlab/gitlab.md
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Gitlab
|
||||||
|
|
||||||
|
## Docker Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Change the server's SSH port in /etc/ssh/sshd_config
|
||||||
|
Port = 2424
|
||||||
|
|
||||||
|
# Then tell selinux about it
|
||||||
|
semanage port -a -t ssh_port_t -p tcp 2424
|
||||||
|
# And add the firewall rule
|
||||||
|
firewall-cmd --add-port=2424/tcp --permanent
|
||||||
|
firewall-cmd --reload
|
||||||
|
# Reload SSH
|
||||||
|
systemctl restart sshd
|
||||||
|
|
||||||
|
# Make a Gitlab directory
|
||||||
|
mkdir -p /srv/gitlab
|
||||||
|
|
||||||
|
# Add the following to .bashrc (used in the compose file)
|
||||||
|
export GITLAB_HOME=/srv/gitlab
|
||||||
|
```
|
||||||
|
|
||||||
|
Create your `gitlab-compose.yaml`. See the file in this repo for an example.
|
||||||
|
|
||||||
|
Also create the file `secrets/gitlab.rb` with your configuration. Should look something like this:
|
||||||
|
|
||||||
|
```ruby
|
||||||
|
# Add any other gitlab.rb configuration here, each on its own line
|
||||||
|
external_url 'https://gitlab.reeseapps.com'
|
||||||
|
nginx['listen_port'] = 80
|
||||||
|
nginx['listen_https'] = false
|
||||||
|
nginx['proxy_set_headers'] = {
|
||||||
|
"X-Forwarded-Proto" => "https",
|
||||||
|
"X-Forwarded-Ssl" => "on",
|
||||||
|
"Host" => "gitlab.mydomain.de",
|
||||||
|
"X-Real-IP" => "$$remote_addr",
|
||||||
|
"X-Forwarded-For" => "$$proxy_add_x_forwarded_for",
|
||||||
|
"Upgrade" => "$$http_upgrade",
|
||||||
|
"Connection" => "$$connection_upgrade"
|
||||||
|
}
|
||||||
|
gitlab_rails['smtp_enable'] = true
|
||||||
|
gitlab_rails['smtp_address'] = "email-smtp.us-east-1.amazonaws.com"
|
||||||
|
gitlab_rails['smtp_port'] = 465
|
||||||
|
gitlab_rails['smtp_user_name'] = ""
|
||||||
|
gitlab_rails['smtp_password'] = ""
|
||||||
|
gitlab_rails['smtp_domain'] = ""
|
||||||
|
gitlab_rails['smtp_authentication'] = "login"
|
||||||
|
gitlab_rails['smtp_ssl'] = true
|
||||||
|
gitlab_rails['smtp_force_ssl'] = true
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy `gitlab.rb` and `gitlab-compose.yaml` to your server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_gitlab/gitlab-compose.yaml gitlab:
|
||||||
|
scp active/container_gitlab/secrets/gitlab.rb gitlab:/srv/gitlab
|
||||||
|
```
|
||||||
|
|
||||||
|
Then docker compose up:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose -f gitlab-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
The initial username is root and the password will be at `/srv/gitlab/config/initial_root_password`.
|
||||||
@@ -13,15 +13,14 @@ podman run \
|
|||||||
--name=grafana \
|
--name=grafana \
|
||||||
--volume grafana-storage:/var/lib/grafana \
|
--volume grafana-storage:/var/lib/grafana \
|
||||||
--network=systemd-graphite \
|
--network=systemd-graphite \
|
||||||
grafana/grafana-enterprise > podman/incubating/grafana/grafana.container
|
grafana/grafana-enterprise > active/container_grafana/grafana.container
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Copy the graphite.container and graphite.network file to the server you want to run it on
|
Copy the graphite.container and graphite.network file to the server you want to run it on
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PODMAN_SERVER=
|
export PODMAN_SERVER=
|
||||||
scp podman/incubating/grafana/grafana.container $PODMAN_SERVER:/etc/containers/systemd/
|
scp active/container_grafana/grafana.container $PODMAN_SERVER:/etc/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER systemctl daemon-reload
|
ssh $PODMAN_SERVER systemctl daemon-reload
|
||||||
ssh $PODMAN_SERVER systemctl enable --now grafana.service
|
ssh $PODMAN_SERVER systemctl enable --now grafana.service
|
||||||
@@ -7,7 +7,7 @@
|
|||||||
```bash
|
```bash
|
||||||
# Generate the network
|
# Generate the network
|
||||||
podman run ghcr.io/containers/podlet --description Graphite \
|
podman run ghcr.io/containers/podlet --description Graphite \
|
||||||
podman network create --ipv6 graphite > podman/incubating/graphite/graphite.network
|
podman network create --ipv6 graphite > active/container_graphite/graphite.network
|
||||||
|
|
||||||
# Generate the systemd container service
|
# Generate the systemd container service
|
||||||
podman run ghcr.io/containers/podlet --description Graphite \
|
podman run ghcr.io/containers/podlet --description Graphite \
|
||||||
@@ -23,15 +23,15 @@ podman run \
|
|||||||
-v graphite_configs:/opt/graphite/conf \
|
-v graphite_configs:/opt/graphite/conf \
|
||||||
-v graphite_data:/opt/graphite/storage \
|
-v graphite_data:/opt/graphite/storage \
|
||||||
-v graphite_statsd_config:/opt/statsd/config \
|
-v graphite_statsd_config:/opt/statsd/config \
|
||||||
ghcr.io/deniszh/graphite-statsd > podman/incubating/graphite/graphite.container
|
ghcr.io/deniszh/graphite-statsd > active/container_graphite/graphite.container
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy the graphite.container and graphite.network file to the server you want to run it on
|
Copy the graphite.container and graphite.network file to the server you want to run it on
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PODMAN_SERVER=
|
export PODMAN_SERVER=
|
||||||
scp podman/incubating/graphite/graphite.network $PODMAN_SERVER:/etc/containers/systemd/
|
scp active/container_graphite/graphite.network $PODMAN_SERVER:/etc/containers/systemd/
|
||||||
scp podman/incubating/graphite/graphite.container $PODMAN_SERVER:/etc/containers/systemd/
|
scp active/container_graphite/graphite.container $PODMAN_SERVER:/etc/containers/systemd/
|
||||||
|
|
||||||
ssh $PODMAN_SERVER systemctl daemon-reload
|
ssh $PODMAN_SERVER systemctl daemon-reload
|
||||||
ssh $PODMAN_SERVER systemctl start graphite.network
|
ssh $PODMAN_SERVER systemctl start graphite.network
|
||||||
3
active/container_immich/compose/README.md
Normal file
3
active/container_immich/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
79
active/container_immich/compose/compose.yaml
Normal file
79
active/container_immich/compose/compose.yaml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
#
|
||||||
|
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
|
||||||
|
#
|
||||||
|
# Make sure to use the docker-compose.yml of the current release:
|
||||||
|
#
|
||||||
|
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||||
|
#
|
||||||
|
# The compose file on main may not be compatible with the latest release.
|
||||||
|
|
||||||
|
services:
|
||||||
|
immich-server:
|
||||||
|
container_name: immich_server
|
||||||
|
image: ghcr.io/immich-app/immich-server:v2.3.1
|
||||||
|
# extends:
|
||||||
|
# file: hwaccel.transcoding.yml
|
||||||
|
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||||
|
volumes:
|
||||||
|
- /home/immich/library:/data:Z
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
ports:
|
||||||
|
- '2283:2283'
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
- database
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
disable: false
|
||||||
|
networks:
|
||||||
|
- immich
|
||||||
|
|
||||||
|
immich-machine-learning:
|
||||||
|
container_name: immich_machine_learning
|
||||||
|
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
|
||||||
|
# Example tag: release-cuda
|
||||||
|
image: ghcr.io/immich-app/immich-machine-learning:release
|
||||||
|
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
|
||||||
|
# file: hwaccel.ml.yml
|
||||||
|
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
||||||
|
volumes:
|
||||||
|
- /home/immich/model-cache:/cache:Z
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
disable: false
|
||||||
|
networks:
|
||||||
|
- immich
|
||||||
|
|
||||||
|
redis:
|
||||||
|
container_name: immich_redis
|
||||||
|
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
|
||||||
|
healthcheck:
|
||||||
|
test: redis-cli ping || exit 1
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
- immich
|
||||||
|
|
||||||
|
database:
|
||||||
|
container_name: immich_postgres
|
||||||
|
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: immich
|
||||||
|
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||||
|
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
|
||||||
|
# DB_STORAGE_TYPE: 'HDD'
|
||||||
|
volumes:
|
||||||
|
- /home/immich/postgres:/var/lib/postgresql/data:Z
|
||||||
|
shm_size: 128mb
|
||||||
|
restart: always
|
||||||
|
networks:
|
||||||
|
- immich
|
||||||
|
|
||||||
|
networks:
|
||||||
|
immich:
|
||||||
|
enable_ipv6: true
|
||||||
206
active/container_immich/immich.md
Normal file
206
active/container_immich/immich.md
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
# Podman immich
|
||||||
|
|
||||||
|
- [Podman immich](#podman-immich)
|
||||||
|
- [Setup immich Project](#setup-immich-project)
|
||||||
|
- [Install immich with Docker](#install-immich-with-docker)
|
||||||
|
- [Install immich with Rootless Podman](#install-immich-with-rootless-podman)
|
||||||
|
- [Create the immich user](#create-the-immich-user)
|
||||||
|
- [Write the immich compose spec](#write-the-immich-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert immich compose spec to quadlets](#convert-immich-compose-spec-to-quadlets)
|
||||||
|
- [Create any container-mounted directories](#create-any-container-mounted-directories)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose immich](#expose-immich)
|
||||||
|
- [firewalld](#firewalld)
|
||||||
|
- [Backup immich](#backup-immich)
|
||||||
|
- [Upgrade immich](#upgrade-immich)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Upload Images in Bulk](#upload-images-in-bulk)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup immich Project
|
||||||
|
|
||||||
|
- [x] Copy and rename this folder to active/container_immich
|
||||||
|
- [x] Find and replace immich with the name of the service.
|
||||||
|
- [x] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install immich with Docker
|
||||||
|
|
||||||
|
<https://docs.immich.app/install/docker-compose/>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp active/container_immich/release-compose.yaml immich:
|
||||||
|
scp active/container_immich/release-env immich:.env
|
||||||
|
|
||||||
|
mkdir /srv/immich
|
||||||
|
docker compose -f release-compose.yaml up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install immich with Rootless Podman
|
||||||
|
|
||||||
|
### Create the immich user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd immich
|
||||||
|
loginctl enable-linger $(id -u immich)
|
||||||
|
systemctl --user --machine=immich@.host enable podman-restart
|
||||||
|
systemctl --user --machine=immich@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/immich/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the immich compose spec
|
||||||
|
|
||||||
|
1. Pull down the immich files
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Pull the compose file
|
||||||
|
wget -O active/container_immich/release-compose.yaml https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||||
|
|
||||||
|
# Pull the .env file
|
||||||
|
wget -O active/container_immich/release-env https://github.com/immich-app/immich/releases/latest/download/example.env
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Edit the compose.yaml. Replace all environment variables with their correct values.
|
||||||
|
3. Edit the .env file. Make sure to match exactly what is in the compose file.
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/immich/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert immich compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_immich/compose:/compose \
|
||||||
|
-v $(pwd)/active/container_immich/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=3dserver
|
||||||
|
scp -r active/container_immich/quadlets/. $PODMAN_SERVER:/home/immich/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R immich:immich /home/immich/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create any container-mounted directories
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell immich@
|
||||||
|
podman unshare
|
||||||
|
mkdir library postgres model-cache
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell immich@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart immich-server.service immich-machine-learning.service
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose immich
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
#### firewalld
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# command to get current active zone and default zone
|
||||||
|
firewall-cmd --get-active-zones
|
||||||
|
firewall-cmd --get-default-zone
|
||||||
|
|
||||||
|
# command to open 443 on tcp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
|
||||||
|
|
||||||
|
# command to open 80 and 443 on tcp and udp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
|
||||||
|
|
||||||
|
# command to list available services and then open http and https
|
||||||
|
firewall-cmd --get-services
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup immich
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade immich
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-immich-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/immich/.config/containers/systemd/
|
||||||
|
ssh immich systemctl --user daemon-reload
|
||||||
|
ssh immich systemctl --user restart immich
|
||||||
|
```
|
||||||
|
|
||||||
|
## Upload Images in Bulk
|
||||||
|
|
||||||
|
<https://docs.immich.app/features/command-line-interface/>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install the CLI
|
||||||
|
npm i -g @immich/cli
|
||||||
|
|
||||||
|
# immich login [url] [key]
|
||||||
|
immich login http://192.168.1.216:2283/api <key here>
|
||||||
|
|
||||||
|
# Check the upload
|
||||||
|
immich upload --dry-run --recursive directory/
|
||||||
|
|
||||||
|
# Upload
|
||||||
|
immich upload --recursive directory/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user immich is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel immich
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
26
active/container_immich/quadlets/.env
Normal file
26
active/container_immich/quadlets/.env
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
|
||||||
|
|
||||||
|
# The location where your uploaded files are stored
|
||||||
|
UPLOAD_LOCATION=/home/immich/library
|
||||||
|
|
||||||
|
# The location where your database files are stored. Network shares are not supported for the database
|
||||||
|
DB_DATA_LOCATION=/home/immich/postgres
|
||||||
|
|
||||||
|
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||||
|
TZ=Etc/EST
|
||||||
|
|
||||||
|
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
|
||||||
|
IMMICH_VERSION=release
|
||||||
|
|
||||||
|
# Connection secret for postgres. You should change it to a random password
|
||||||
|
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
|
||||||
|
DB_PASSWORD=postgres
|
||||||
|
|
||||||
|
# The values below this line do not need to be changed
|
||||||
|
###################################################################################
|
||||||
|
DB_USERNAME=postgres
|
||||||
|
DB_DATABASE_NAME=immich
|
||||||
|
|
||||||
|
# Should match the container_name fields in the compose.yaml
|
||||||
|
REDIS_HOSTNAME=immich_redis
|
||||||
|
DB_HOSTNAME=immich_postgres
|
||||||
3
active/container_immich/quadlets/README.md
Normal file
3
active/container_immich/quadlets/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Quadlets
|
||||||
|
|
||||||
|
Put your quadlets here.
|
||||||
13
active/container_immich/quadlets/database.container
Normal file
13
active/container_immich/quadlets/database.container
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[Container]
|
||||||
|
ContainerName=immich_postgres
|
||||||
|
Environment=POSTGRES_PASSWORD=postgres POSTGRES_USER=postgres POSTGRES_DB=immich POSTGRES_INITDB_ARGS=--data-checksums
|
||||||
|
Image=ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
|
||||||
|
Network=immich.network
|
||||||
|
ShmSize=128mb
|
||||||
|
Volume=/home/immich/postgres:/var/lib/postgresql/data:Z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
[Container]
|
||||||
|
ContainerName=immich_machine_learning
|
||||||
|
EnvironmentFile=.env
|
||||||
|
Image=ghcr.io/immich-app/immich-machine-learning:release
|
||||||
|
Network=immich.network
|
||||||
|
Volume=/home/immich/model-cache:/cache:Z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
17
active/container_immich/quadlets/immich-server.container
Normal file
17
active/container_immich/quadlets/immich-server.container
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[Unit]
|
||||||
|
Requires=redis.service database.service
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
ContainerName=immich_server
|
||||||
|
EnvironmentFile=.env
|
||||||
|
Image=ghcr.io/immich-app/immich-server:v2.3.1
|
||||||
|
Network=immich.network
|
||||||
|
PublishPort=2283:2283
|
||||||
|
Volume=/home/immich/library:/data:Z
|
||||||
|
Volume=/etc/localtime:/etc/localtime:ro
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
5
active/container_immich/quadlets/immich.network
Normal file
5
active/container_immich/quadlets/immich.network
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[Network]
|
||||||
|
IPv6=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
11
active/container_immich/quadlets/redis.container
Normal file
11
active/container_immich/quadlets/redis.container
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
[Container]
|
||||||
|
ContainerName=immich_redis
|
||||||
|
HealthCmd=redis-cli ping || exit 1
|
||||||
|
Image=docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
|
||||||
|
Network=immich.network
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
74
active/container_immich/release-compose.yaml
Normal file
74
active/container_immich/release-compose.yaml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
#
|
||||||
|
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
|
||||||
|
#
|
||||||
|
# Make sure to use the docker-compose.yml of the current release:
|
||||||
|
#
|
||||||
|
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||||
|
#
|
||||||
|
# The compose file on main may not be compatible with the latest release.
|
||||||
|
|
||||||
|
name: immich
|
||||||
|
|
||||||
|
services:
|
||||||
|
immich-server:
|
||||||
|
container_name: immich_server
|
||||||
|
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||||
|
# extends:
|
||||||
|
# file: hwaccel.transcoding.yml
|
||||||
|
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||||
|
volumes:
|
||||||
|
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
|
||||||
|
- ${UPLOAD_LOCATION}:/data
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
env_file:
|
||||||
|
- /root/.env
|
||||||
|
ports:
|
||||||
|
- '2283:2283'
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
- database
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
disable: false
|
||||||
|
|
||||||
|
immich-machine-learning:
|
||||||
|
container_name: immich_machine_learning
|
||||||
|
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
|
||||||
|
# Example tag: ${IMMICH_VERSION:-release}-cuda
|
||||||
|
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
|
||||||
|
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
|
||||||
|
# file: hwaccel.ml.yml
|
||||||
|
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
||||||
|
volumes:
|
||||||
|
- model-cache:/cache
|
||||||
|
env_file:
|
||||||
|
- /root/.env
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
disable: false
|
||||||
|
|
||||||
|
redis:
|
||||||
|
container_name: immich_redis
|
||||||
|
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
|
||||||
|
healthcheck:
|
||||||
|
test: redis-cli ping || exit 1
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
database:
|
||||||
|
container_name: immich_postgres
|
||||||
|
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||||
|
POSTGRES_USER: ${DB_USERNAME}
|
||||||
|
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||||
|
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||||
|
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
|
||||||
|
# DB_STORAGE_TYPE: 'HDD'
|
||||||
|
volumes:
|
||||||
|
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
|
||||||
|
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
|
||||||
|
shm_size: 128mb
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
model-cache:
|
||||||
22
active/container_immich/release-env
Normal file
22
active/container_immich/release-env
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# You can find documentation for all the supported env variables at https://docs.immich.app/install/environment-variables
|
||||||
|
|
||||||
|
# The location where your uploaded files are stored
|
||||||
|
UPLOAD_LOCATION=/srv/immich-data/library
|
||||||
|
|
||||||
|
# The location where your database files are stored. Network shares are not supported for the database
|
||||||
|
DB_DATA_LOCATION=/srv/immich-db/postgres
|
||||||
|
|
||||||
|
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
|
||||||
|
# TZ=Etc/UTC
|
||||||
|
|
||||||
|
# The Immich version to use. You can pin this to a specific version like "v2.1.0"
|
||||||
|
IMMICH_VERSION=v2
|
||||||
|
|
||||||
|
# Connection secret for postgres. You should change it to a random password
|
||||||
|
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
|
||||||
|
DB_PASSWORD=postgres
|
||||||
|
|
||||||
|
# The values below this line do not need to be changed
|
||||||
|
###################################################################################
|
||||||
|
DB_USERNAME=postgres
|
||||||
|
DB_DATABASE_NAME=immich
|
||||||
42
active/container_iperf3/iperf3.md
Normal file
42
active/container_iperf3/iperf3.md
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Iperf3 Server on Podman
|
||||||
|
|
||||||
|
- [Iperf3 Server on Podman](#iperf3-server-on-podman)
|
||||||
|
- [Install](#install)
|
||||||
|
- [Client](#client)
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
Create a new ipv6 network
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# As root
|
||||||
|
podman network create iperf3 --ipv6
|
||||||
|
```
|
||||||
|
|
||||||
|
Create the iperf3 container
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# As root
|
||||||
|
podman run \
|
||||||
|
--name iperf3 \
|
||||||
|
-d \
|
||||||
|
-p 5201:5201 \
|
||||||
|
--restart=always \
|
||||||
|
--network=iperf3 \
|
||||||
|
docker.io/networkstatic/iperf3:latest -s
|
||||||
|
```
|
||||||
|
|
||||||
|
Make sure you have the `podman-restart` service enabled and running
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl enable --now podman-restart
|
||||||
|
```
|
||||||
|
|
||||||
|
## Client
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run \
|
||||||
|
--rm \
|
||||||
|
--network=iperf3 \
|
||||||
|
docker.io/networkstatic/iperf3:latest -c 3dserver.reeselink.com -P 5
|
||||||
|
```
|
||||||
17
active/container_jellyfin/jellyfin-compose.yaml
Normal file
17
active/container_jellyfin/jellyfin-compose.yaml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
services:
|
||||||
|
jellyfin:
|
||||||
|
image: jellyfin/jellyfin
|
||||||
|
container_name: jellyfin
|
||||||
|
ports:
|
||||||
|
- 8096:8096/tcp
|
||||||
|
- 7359:7359/udp
|
||||||
|
volumes:
|
||||||
|
- /srv/jellyfin/config:/config
|
||||||
|
- /srv/jellyfin/cache:/cache
|
||||||
|
- type: bind
|
||||||
|
source: /mnt/media
|
||||||
|
target: /media
|
||||||
|
read_only: true
|
||||||
|
restart: 'always'
|
||||||
|
environment:
|
||||||
|
- JELLYFIN_PublishedServerUrl=https://jellyfin.reeseapps.com
|
||||||
74
active/container_jellyfin/jellyfin.md
Normal file
74
active/container_jellyfin/jellyfin.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Jellyfin
|
||||||
|
|
||||||
|
- [Jellyfin](#jellyfin)
|
||||||
|
- [Docker Install](#docker-install)
|
||||||
|
- [Rootless Podman Install](#rootless-podman-install)
|
||||||
|
- [Upgrade](#upgrade)
|
||||||
|
- [Mounting Media Directory](#mounting-media-directory)
|
||||||
|
|
||||||
|
They have podman rootless instructions!
|
||||||
|
|
||||||
|
<https://jellyfin.org/docs/general/installation/container/#managing-via-systemd>
|
||||||
|
|
||||||
|
## Docker Install
|
||||||
|
|
||||||
|
<https://jellyfin.org/docs/general/installation/container>
|
||||||
|
|
||||||
|
## Rootless Podman Install
|
||||||
|
|
||||||
|
1. Create the jellyfin user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
useradd jellyfin
|
||||||
|
loginctl enable-linger $(id -u jellyfin)
|
||||||
|
systemctl --user --machine=jellyfin@.host enable podman-restart
|
||||||
|
systemctl --user --machine=jellyfin@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/jellyfin/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Update the jellyfin record in Caddy.
|
||||||
|
3. Open port 8096 in the firewall.
|
||||||
|
4. Copy the files to the server and start the service
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=3dserver
|
||||||
|
scp -r active/container_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
|
||||||
|
|
||||||
|
ssh $PODMAN_SERVER
|
||||||
|
machinectl shell jellyfin@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart jellyfin
|
||||||
|
```
|
||||||
|
|
||||||
|
## Upgrade
|
||||||
|
|
||||||
|
1. Check [the blog](https://jellyfin.org/posts) for breaking changes
|
||||||
|
2. Update the `jellyfin.container` with the new image version
|
||||||
|
3. Update quadlets and restart the service
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Upload quadlets and restart
|
||||||
|
export PODMAN_SERVER=3dserver
|
||||||
|
scp -r active/container_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
|
||||||
|
|
||||||
|
ssh $PODMAN_SERVER
|
||||||
|
machinectl shell jellyfin@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart jellyfin
|
||||||
|
```
|
||||||
|
|
||||||
|
## Mounting Media Directory
|
||||||
|
|
||||||
|
Update /etc/fstab with the smb disk details.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
|
||||||
|
- `x-systemd.automount` which only mounts the device when it's accessed.
|
||||||
|
- `x-systemd.mount-timeout=30` allows a 30 second timeout
|
||||||
|
- `_netdev` ensures the device won't be mounted until after the network is available
|
||||||
|
|
||||||
|
```conf
|
||||||
|
UUID=... /btrfs/some-name btrfs subvolid=5,compress=zstd:1,x-systemd.automount,x-systemd.mount-timeout=30,_netdev 0 0
|
||||||
|
```
|
||||||
18
active/container_jellyfin/quadlets/jellyfin.container
Normal file
18
active/container_jellyfin/quadlets/jellyfin.container
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
[Container]
|
||||||
|
Image=docker.io/jellyfin/jellyfin:10.11.3
|
||||||
|
AutoUpdate=registry
|
||||||
|
PublishPort=8096:8096/tcp
|
||||||
|
UserNS=keep-id
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
Volume=/home/jellyfin/jellyfin-config:/config:Z
|
||||||
|
Volume=/home/jellyfin/jellyfin-cache:/cache:Z
|
||||||
|
Volume=/var/media:/media:Z
|
||||||
|
Network=jellyfin.network
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
# Inform systemd of additional exit status
|
||||||
|
SuccessExitStatus=0 143
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
# Start by default on boot
|
||||||
|
WantedBy=default.target
|
||||||
8
active/container_jellyfin/quadlets/jellyfin.network
Normal file
8
active/container_jellyfin/quadlets/jellyfin.network
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Jellyfin
|
||||||
|
|
||||||
|
[Network]
|
||||||
|
IPv6=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
3
active/container_keycloak/compose/README.md
Normal file
3
active/container_keycloak/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
19
active/container_keycloak/compose/compose.yaml
Normal file
19
active/container_keycloak/compose/compose.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
services:
|
||||||
|
keycloak:
|
||||||
|
container_name: keycloak
|
||||||
|
restart: always
|
||||||
|
image: quay.io/keycloak/keycloak:26.4.5
|
||||||
|
ports:
|
||||||
|
- "9443:443"
|
||||||
|
- "8443:8443"
|
||||||
|
volumes:
|
||||||
|
- /home/foobar/data:/var/app/data
|
||||||
|
security_opt:
|
||||||
|
- label=disable
|
||||||
|
userns_mode: keep-id
|
||||||
|
command:
|
||||||
|
- "start"
|
||||||
|
- "--hostname"
|
||||||
|
- "https://keycloak.reeseapps.com"
|
||||||
|
- "--hostname-admin"
|
||||||
|
- "https://keycloak.reeselink.com:8443"
|
||||||
186
active/container_keycloak/keycloak.md
Normal file
186
active/container_keycloak/keycloak.md
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
# Podman keycloak
|
||||||
|
|
||||||
|
- [Podman keycloak](#podman-keycloak)
|
||||||
|
- [Setup keycloak Project](#setup-keycloak-project)
|
||||||
|
- [Install Keycloak with Docker](#install-keycloak-with-docker)
|
||||||
|
- [Install Keycloak with Podman](#install-keycloak-with-podman)
|
||||||
|
- [Create the keycloak user](#create-the-keycloak-user)
|
||||||
|
- [Write the keycloak compose spec](#write-the-keycloak-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert keycloak compose spec to quadlets](#convert-keycloak-compose-spec-to-quadlets)
|
||||||
|
- [Create any container-mounted directories](#create-any-container-mounted-directories)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose keycloak](#expose-keycloak)
|
||||||
|
- [firewalld](#firewalld)
|
||||||
|
- [Backup keycloak](#backup-keycloak)
|
||||||
|
- [Upgrade keycloak](#upgrade-keycloak)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup keycloak Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_keycloak
|
||||||
|
- [ ] Find and replace keycloak with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install Keycloak with Docker
|
||||||
|
|
||||||
|
<https://www.keycloak.org/getting-started/getting-started-docker>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test in dev mode
|
||||||
|
docker run -p 8080:8080 -e KC_BOOTSTRAP_ADMIN_USERNAME=admin -e KC_BOOTSTRAP_ADMIN_PASSWORD=admin quay.io/keycloak/keycloak:26.4.7 start-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
## Install Keycloak with Podman
|
||||||
|
|
||||||
|
### Create the keycloak user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd keycloak
|
||||||
|
loginctl enable-linger $(id -u keycloak)
|
||||||
|
systemctl --user --machine=keycloak@.host enable podman-restart
|
||||||
|
systemctl --user --machine=keycloak@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/keycloak/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the keycloak compose spec
|
||||||
|
|
||||||
|
<https://www.keycloak.org/getting-started/getting-started-podman>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Based on the example
|
||||||
|
podman run \
|
||||||
|
-p 127.0.0.1:8080:8080 \
|
||||||
|
-e KC_BOOTSTRAP_ADMIN_USERNAME=admin \
|
||||||
|
-e KC_BOOTSTRAP_ADMIN_PASSWORD=admin \
|
||||||
|
quay.io/keycloak/keycloak:26.4.5 start-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/keycloak/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert keycloak compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_keycloak/:/compose \
|
||||||
|
-v $(pwd)/active/container_keycloak/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r active/container_keycloak/quadlets/. $PODMAN_SERVER:/home/keycloak/.config/containers/systemd/
|
||||||
|
ssh $PODMAN_SERVER chown -R keycloak:keycloak /home/keycloak/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create any container-mounted directories
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell keycloak@
|
||||||
|
podman unshare
|
||||||
|
mkdir some_volume
|
||||||
|
# Chown to the namespaced user with UID 1000
|
||||||
|
# This will be some really obscure UID outside the namespace
|
||||||
|
# This will also solve most permission denied errors
|
||||||
|
chown -R 1000:1000 some_volume
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
machinectl shell keycloak@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart keycloak
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose keycloak
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
#### firewalld
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# command to get current active zone and default zone
|
||||||
|
firewall-cmd --get-active-zones
|
||||||
|
firewall-cmd --get-default-zone
|
||||||
|
|
||||||
|
# command to open 443 on tcp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
|
||||||
|
|
||||||
|
# command to open 80 and 443 on tcp and udp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
|
||||||
|
|
||||||
|
# command to list available services and then open http and https
|
||||||
|
firewall-cmd --get-services
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup keycloak
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade keycloak
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-keycloak-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/keycloak/.config/containers/systemd/
|
||||||
|
ssh keycloak systemctl --user daemon-reload
|
||||||
|
ssh keycloak systemctl --user restart keycloak
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user keycloak is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel keycloak
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
3
active/container_keycloak/quadlets/README.md
Normal file
3
active/container_keycloak/quadlets/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Quadlets
|
||||||
|
|
||||||
|
Put your quadlets here.
|
||||||
3
active/container_litellm/compose/README.md
Normal file
3
active/container_litellm/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
37
active/container_litellm/compose/compose.yaml
Normal file
37
active/container_litellm/compose/compose.yaml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
services:
|
||||||
|
litellm:
|
||||||
|
image: docker.litellm.ai/berriai/litellm:main-latest
|
||||||
|
ports:
|
||||||
|
- 4000:4000
|
||||||
|
env_file: /home/ai/litellm.env
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: "postgresql://llmproxy:dbpassword9090@host.containers.internal:5432/litellm"
|
||||||
|
STORE_MODEL_IN_DB: "True"
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
- litellm-db # Indicates that this service depends on the 'litellm-db' service, ensuring 'litellm-db' starts first
|
||||||
|
healthcheck: # Defines the health check configuration for the container
|
||||||
|
test:
|
||||||
|
- CMD-SHELL
|
||||||
|
- python3 -c "import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')" # Command to execute for health check
|
||||||
|
interval: 30s # Perform health check every 30 seconds
|
||||||
|
timeout: 10s # Health check command times out after 10 seconds
|
||||||
|
retries: 3 # Retry up to 3 times if health check fails
|
||||||
|
start_period: 40s # Wait 40 seconds after container start before beginning health checks
|
||||||
|
|
||||||
|
litellm-db:
|
||||||
|
image: docker.io/postgres:16
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: litellm
|
||||||
|
POSTGRES_USER: llmproxy
|
||||||
|
POSTGRES_PASSWORD: dbpassword9090
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- litellm_postgres_data:/var/lib/postgresql/data:z
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -d litellm -U llmproxy"]
|
||||||
|
interval: 1s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 10
|
||||||
67
active/container_litellm/config.yaml
Normal file
67
active/container_litellm/config.yaml
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# General settings
|
||||||
|
|
||||||
|
general_settings:
|
||||||
|
request_timeout: 600
|
||||||
|
|
||||||
|
# Models
|
||||||
|
model_list:
|
||||||
|
# Qwen3.5-35B variants
|
||||||
|
- model_name: qwen3.5-35b-think-general
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 1.0
|
||||||
|
top_p: 0.95
|
||||||
|
presence_penalty: 1.5
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: true
|
||||||
|
|
||||||
|
- model_name: qwen3.5-35b-think-code
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 0.6
|
||||||
|
top_p: 0.95
|
||||||
|
presence_penalty: 0.0
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: true
|
||||||
|
|
||||||
|
- model_name: qwen3.5-35b-instruct-general
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 0.7
|
||||||
|
top_p: 0.8
|
||||||
|
presence_penalty: 1.5
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: false
|
||||||
|
|
||||||
|
- model_name: qwen3.5-35b-instruct-reasoning
|
||||||
|
litellm_params:
|
||||||
|
model: openai/qwen3.5-35b-a3b
|
||||||
|
api_base: https://llama-cpp.reeselink.com
|
||||||
|
api_key: none
|
||||||
|
temperature: 1.0
|
||||||
|
top_p: 0.95
|
||||||
|
presence_penalty: 1.5
|
||||||
|
extra_body:
|
||||||
|
top_k: 20
|
||||||
|
min_p: 0.0
|
||||||
|
repetition_penalty: 1.0
|
||||||
|
chat_template_kwargs:
|
||||||
|
enable_thinking: false
|
||||||
233
active/container_litellm/litellm.md
Normal file
233
active/container_litellm/litellm.md
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
# Podman litellm
|
||||||
|
|
||||||
|
- [Podman litellm](#podman-litellm)
|
||||||
|
- [Setup litellm Project](#setup-litellm-project)
|
||||||
|
- [Install litellm](#install-litellm)
|
||||||
|
- [Create the ai user](#create-the-ai-user)
|
||||||
|
- [Write the litellm compose spec](#write-the-litellm-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert litellm compose spec to quadlets](#convert-litellm-compose-spec-to-quadlets)
|
||||||
|
- [Create the litellm.env file](#create-the-litellmenv-file)
|
||||||
|
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
|
||||||
|
- [Expose litellm](#expose-litellm)
|
||||||
|
- [Using LiteLLM](#using-litellm)
|
||||||
|
- [Adding Models](#adding-models)
|
||||||
|
- [Testing Models](#testing-models)
|
||||||
|
- [Backup litellm](#backup-litellm)
|
||||||
|
- [Upgrade litellm](#upgrade-litellm)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Uninstall](#uninstall)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup litellm Project
|
||||||
|
|
||||||
|
- [ ] Copy and rename this folder to active/container_litellm
|
||||||
|
- [ ] Find and replace litellm with the name of the service.
|
||||||
|
- [ ] Create the rootless user to run the podman containers
|
||||||
|
- [ ] Write the compose.yaml spec for your service
|
||||||
|
- [ ] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [ ] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install litellm
|
||||||
|
|
||||||
|
### Create the ai user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd ai
|
||||||
|
loginctl enable-linger $(id -u ai)
|
||||||
|
systemctl --user --machine=ai@.host enable podman-restart
|
||||||
|
systemctl --user --machine=ai@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/ai/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the litellm compose spec
|
||||||
|
|
||||||
|
See the [docker run command here](https://docs.litellm.ai/docs/proxy/docker_quick_start#32-start-proxy)
|
||||||
|
|
||||||
|
Edit the compose.yaml at active/container_litellm/compose/compose.yaml
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `/home/litellm/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert litellm compose spec to quadlets
|
||||||
|
|
||||||
|
Run the following to convert a compose.yaml into the various `.container` files for systemd:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_litellm/compose:/compose \
|
||||||
|
-v $(pwd)/active/container_litellm/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
export PODMAN_SERVER=ai-ai
|
||||||
|
scp -r active/container_litellm/quadlets/. $PODMAN_SERVER:/home/ai/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create the litellm.env file
|
||||||
|
|
||||||
|
Should look something like:
|
||||||
|
|
||||||
|
```env
|
||||||
|
LITELLM_MASTER_KEY="random-string"
|
||||||
|
LITELLM_SALT_KEY="random-string"
|
||||||
|
|
||||||
|
UI_USERNAME="admin"
|
||||||
|
UI_PASSWORD="random-string"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then copy it to the server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=ai
|
||||||
|
scp -r active/container_litellm/config.yaml $PODMAN_SERVER:/home/ai/litellm_config.yaml
|
||||||
|
ssh $PODMAN_SERVER chown -R ai:ai /home/ai/litellm_config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start and enable your systemd quadlet
|
||||||
|
|
||||||
|
SSH into your podman server as root:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh ai
|
||||||
|
machinectl shell ai@
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart litellm
|
||||||
|
journalctl --user -u litellm -f
|
||||||
|
# Enable auto-update service which will pull new container images automatically every day
|
||||||
|
systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose litellm
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
## Using LiteLLM
|
||||||
|
|
||||||
|
### Adding Models
|
||||||
|
|
||||||
|
```json
|
||||||
|
// qwen3.5-35b-a3b-thinking
|
||||||
|
{
|
||||||
|
"temperature": 1,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-coding
|
||||||
|
{
|
||||||
|
"temperature": 0.6,
|
||||||
|
"top_p": 0.95,
|
||||||
|
"presence_penalty": 0,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// qwen3.5-35b-a3b-instruct
|
||||||
|
{
|
||||||
|
"temperature": 0.7,
|
||||||
|
"top_p": 0.8,
|
||||||
|
"presence_penalty": 1.5,
|
||||||
|
"extra_body": {
|
||||||
|
"top_k": 20,
|
||||||
|
"min_p": 0,
|
||||||
|
"repetition_penalty": 1,
|
||||||
|
"chat_template_kwargs": {
|
||||||
|
"enable_thinking": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing Models
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List models
|
||||||
|
curl -L -X GET 'https://aipi.reeseapps.com/v1/models' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234'
|
||||||
|
|
||||||
|
curl -L -X POST 'https://aipi.reeseapps.com/v1/chat/completions' \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-H 'Authorization: Bearer sk-1234' \
|
||||||
|
-d '{
|
||||||
|
"model": "gpt-4o-mini", # 👈 REPLACE with 'public model name' for any db-model
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"content": "Hey, how's it going",
|
||||||
|
"role": "user"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup litellm
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade litellm
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-litellm-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export PODMAN_SERVER=
|
||||||
|
scp -r quadlets/. $PODMAN_SERVER$:/home/litellm/.config/containers/systemd/
|
||||||
|
ssh litellm systemctl --user daemon-reload
|
||||||
|
ssh litellm systemctl --user restart litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Uninstall
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop the user's services
|
||||||
|
systemctl --user disable podman-restart
|
||||||
|
podman container stop --all
|
||||||
|
systemctl --user disable --now podman.socket
|
||||||
|
systemctl --user disable --now podman-auto-update.timer
|
||||||
|
|
||||||
|
# Delete the user (this won't delete their home directory)
|
||||||
|
# userdel might spit out an error like:
|
||||||
|
# userdel: user litellm is currently used by process 591255
|
||||||
|
# kill those processes and try again
|
||||||
|
userdel litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
15
active/container_litellm/quadlets/litellm-db.container
Normal file
15
active/container_litellm/quadlets/litellm-db.container
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[Container]
|
||||||
|
Environment=POSTGRES_DB=litellm POSTGRES_USER=llmproxy POSTGRES_PASSWORD=dbpassword9090
|
||||||
|
HealthCmd='pg_isready -d litellm -U llmproxy'
|
||||||
|
HealthInterval=1s
|
||||||
|
HealthRetries=10
|
||||||
|
HealthTimeout=5s
|
||||||
|
Image=docker.io/postgres:16
|
||||||
|
PublishPort=5432:5432
|
||||||
|
Volume=litellm_postgres_data:/var/lib/postgresql/data:z
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
19
active/container_litellm/quadlets/litellm.container
Normal file
19
active/container_litellm/quadlets/litellm.container
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[Unit]
|
||||||
|
Requires=litellm-db.service
|
||||||
|
|
||||||
|
[Container]
|
||||||
|
Environment=DATABASE_URL=postgresql://llmproxy:dbpassword9090@host.containers.internal:5432/litellm STORE_MODEL_IN_DB=True
|
||||||
|
EnvironmentFile=/home/ai/litellm.env
|
||||||
|
HealthCmd="python3 -c \"import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')\""
|
||||||
|
HealthInterval=30s
|
||||||
|
HealthRetries=3
|
||||||
|
HealthStartPeriod=40s
|
||||||
|
HealthTimeout=10s
|
||||||
|
Image=docker.litellm.ai/berriai/litellm:main-latest
|
||||||
|
PublishPort=4000:4000
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
- [Local AI with Anything LLM](#local-ai-with-anything-llm)
|
- [Local AI with Anything LLM](#local-ai-with-anything-llm)
|
||||||
- [Useful links I keep losing](#useful-links-i-keep-losing)
|
- [Useful links I keep losing](#useful-links-i-keep-losing)
|
||||||
|
- [Podman](#podman)
|
||||||
- [Running Local AI on Ubuntu 24.04 with Nvidia GPU](#running-local-ai-on-ubuntu-2404-with-nvidia-gpu)
|
- [Running Local AI on Ubuntu 24.04 with Nvidia GPU](#running-local-ai-on-ubuntu-2404-with-nvidia-gpu)
|
||||||
- [Running Local AI on Arch with AMD GPU](#running-local-ai-on-arch-with-amd-gpu)
|
- [Running Local AI on Arch with AMD GPU](#running-local-ai-on-arch-with-amd-gpu)
|
||||||
- [Running Anything LLM](#running-anything-llm)
|
- [Running Anything LLM](#running-anything-llm)
|
||||||
@@ -32,6 +33,12 @@
|
|||||||
- [Example model config files from gallery](https://github.com/mudler/LocalAI/tree/master/gallery)
|
- [Example model config files from gallery](https://github.com/mudler/LocalAI/tree/master/gallery)
|
||||||
- [List of all available models](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml)
|
- [List of all available models](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml)
|
||||||
|
|
||||||
|
## Podman
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -ti --name local-ai -p 8081:8080 --device=/dev/kfd --device=/dev/dri --group-add=video --replace localai/localai:latest-gpu-vulkan
|
||||||
|
```
|
||||||
|
|
||||||
## Running Local AI on Ubuntu 24.04 with Nvidia GPU
|
## Running Local AI on Ubuntu 24.04 with Nvidia GPU
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -124,7 +131,7 @@ pipx install "huggingface_hub[cli]"
|
|||||||
podman network create --ipv6 --label local-ai local-ai
|
podman network create --ipv6 --label local-ai local-ai
|
||||||
|
|
||||||
# You might want to mount an external drive here.
|
# You might want to mount an external drive here.
|
||||||
export MODEL_DIR=/models
|
export MODEL_DIR=/srv/models
|
||||||
mkdir -p $MODEL_DIR
|
mkdir -p $MODEL_DIR
|
||||||
|
|
||||||
# LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one
|
# LOCALAI_SINGLE_ACTIVE_BACKEND will unload the previous model before loading the next one
|
||||||
@@ -136,14 +143,13 @@ mkdir -p $MODEL_DIR
|
|||||||
podman run \
|
podman run \
|
||||||
-d \
|
-d \
|
||||||
-p 8080:8080 \
|
-p 8080:8080 \
|
||||||
-e LOCALAI_API_KEY=$(cat ~/.localai/token) \
|
|
||||||
-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \
|
-e LOCALAI_SINGLE_ACTIVE_BACKEND=true \
|
||||||
--device /dev/dri \
|
--device /dev/dri \
|
||||||
--device /dev/kfd \
|
--device /dev/kfd \
|
||||||
--name local-ai \
|
--name local-ai \
|
||||||
--network local-ai \
|
--replace \
|
||||||
-v $MODEL_DIR:/build/models \
|
-v $MODEL_DIR:/build/models:z \
|
||||||
-v localai-tmp:/tmp/generated \
|
-v localai-tmp:/tmp/generated:z \
|
||||||
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
||||||
|
|
||||||
# The second (8081) will be our frontend. We'll protect it with basic auth.
|
# The second (8081) will be our frontend. We'll protect it with basic auth.
|
||||||
@@ -153,9 +159,9 @@ podman run \
|
|||||||
-d \
|
-d \
|
||||||
-p 8081:8080 \
|
-p 8081:8080 \
|
||||||
--name local-ai-webui \
|
--name local-ai-webui \
|
||||||
--network local-ai \
|
--replace \
|
||||||
-v $MODEL_DIR:/build/models \
|
-v $MODEL_DIR:/build/models:z \
|
||||||
-v localai-tmp:/tmp/generated \
|
-v localai-tmp:/tmp/generated:z \
|
||||||
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
quay.io/go-skynet/local-ai:master-hipblas-ffmpeg
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -194,7 +200,7 @@ letsencrypt certificate generated and in the place nginx expects it.
|
|||||||
Before we can use certbot we need aws credentials. Note this will be different if you
|
Before we can use certbot we need aws credentials. Note this will be different if you
|
||||||
use a different DNS provider.
|
use a different DNS provider.
|
||||||
|
|
||||||
See [generating AWS credentials](cloud/graduated/aws_iam/README.md)
|
See [generating AWS credentials](active/cloud_aws_iam/README.md)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
|
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
|
||||||
3
active/container_matrix/compose/README.md
Normal file
3
active/container_matrix/compose/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Compose
|
||||||
|
|
||||||
|
Put your compose.yaml here.
|
||||||
28
active/container_matrix/compose/compose.yaml
Normal file
28
active/container_matrix/compose/compose.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# tuwunel
|
||||||
|
|
||||||
|
services:
|
||||||
|
matrix:
|
||||||
|
image: ghcr.io/matrix-construct/tuwunel:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- 8448:6167
|
||||||
|
volumes:
|
||||||
|
- /home/matrix/tuwunel-db:/var/lib/tuwunel
|
||||||
|
#- ./tuwunel.toml:/etc/tuwunel.toml
|
||||||
|
security_opt:
|
||||||
|
- "label=disable"
|
||||||
|
userns_mode: keep-id
|
||||||
|
environment:
|
||||||
|
TUWUNEL_SERVER_NAME: matrix.reeseapps.com # EDIT THIS
|
||||||
|
TUWUNEL_DATABASE_PATH: /var/lib/tuwunel
|
||||||
|
TUWUNEL_PORT: 6167
|
||||||
|
TUWUNEL_MAX_REQUEST_SIZE: 200000000 # in bytes, ~200 MB
|
||||||
|
TUWUNEL_ALLOW_REGISTRATION: 'false'
|
||||||
|
# TUWUNEL_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
|
||||||
|
#TUWUNEL_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
|
||||||
|
TUWUNEL_ALLOW_FEDERATION: 'true'
|
||||||
|
TUWUNEL_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
|
TUWUNEL_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
|
#TUWUNEL_LOG: warn,state_res=warn
|
||||||
|
TUWUNEL_ADDRESS: 0.0.0.0
|
||||||
|
#TUWUNEL_CONFIG: '/etc/tuwunel.toml' # Uncomment if you mapped config toml above
|
||||||
145
active/container_matrix/matrix.md
Normal file
145
active/container_matrix/matrix.md
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
# Podman matrix
|
||||||
|
|
||||||
|
- [Podman matrix](#podman-matrix)
|
||||||
|
- [Setup matrix Project](#setup-matrix-project)
|
||||||
|
- [Install matrix](#install-matrix)
|
||||||
|
- [Create the matrix user](#create-the-matrix-user)
|
||||||
|
- [Write the matrix compose spec](#write-the-matrix-compose-spec)
|
||||||
|
- [A Note on Volumes](#a-note-on-volumes)
|
||||||
|
- [Convert matrix compose spec to quadlets](#convert-matrix-compose-spec-to-quadlets)
|
||||||
|
- [Setup matrix users](#setup-matrix-users)
|
||||||
|
- [Expose matrix](#expose-matrix)
|
||||||
|
- [firewalld](#firewalld)
|
||||||
|
- [Backup matrix](#backup-matrix)
|
||||||
|
- [Upgrade matrix](#upgrade-matrix)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Notes](#notes)
|
||||||
|
- [SELinux](#selinux)
|
||||||
|
|
||||||
|
## Setup matrix Project
|
||||||
|
|
||||||
|
- [x] Copy and rename this folder to active/container_matrix
|
||||||
|
- [x] Find and replace matrix with the name of the service.
|
||||||
|
- [x] Create the rootless user to run the podman containers
|
||||||
|
- [x] Write the compose.yaml spec for your service
|
||||||
|
- [x] Convert the compose.yaml spec to a quadlet
|
||||||
|
- [x] Install the quadlet on the podman server
|
||||||
|
- [ ] Expose the quadlet service
|
||||||
|
- [ ] Install a backup service and timer
|
||||||
|
|
||||||
|
## Install matrix
|
||||||
|
|
||||||
|
### Create the matrix user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd matrix
|
||||||
|
loginctl enable-linger $(id -u matrix)
|
||||||
|
systemctl --user --machine=matrix@.host enable podman-restart
|
||||||
|
systemctl --user --machine=matrix@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/matrix/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Write the matrix compose spec
|
||||||
|
|
||||||
|
Edit the compose.yaml at active/matrix/compose/compose.yaml
|
||||||
|
|
||||||
|
#### A Note on Volumes
|
||||||
|
|
||||||
|
Named volumes are stored at `~/.local/share/containers/storage/volumes/`.
|
||||||
|
|
||||||
|
### Convert matrix compose spec to quadlets
|
||||||
|
|
||||||
|
On your local machine:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_matrix/compose:/compose \
|
||||||
|
-v $(pwd)/active/container_matrix/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
scp -r active/container_matrix/quadlets/. matrix:~/.config/containers/systemd/
|
||||||
|
|
||||||
|
# Copy the compose files to the server
|
||||||
|
scp -r active/container_matrix/compose/. matrix:~/.config//
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh matrix systemctl --user daemon-reload
|
||||||
|
ssh matrix systemctl --user restart matrix
|
||||||
|
# Enables auto-update service which will pull new container images automatically every day
|
||||||
|
ssh matrix systemctl --user enable --now podman-auto-update.timer
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setup matrix users
|
||||||
|
|
||||||
|
```bash
|
||||||
|
podman run \
|
||||||
|
-v /home/matrix/tuwunel-db:/var/lib/tuwunel:Z \
|
||||||
|
-e TUWUNEL_SERVER_NAME=matrix.reeseapps.com \
|
||||||
|
-e TUWUNEL_DATABASE_PATH=/var/lib/tuwunel \
|
||||||
|
--userns=keep-id \
|
||||||
|
--
|
||||||
|
-it \
|
||||||
|
--rm \
|
||||||
|
ghcr.io/matrix-construct/tuwunel:latest \
|
||||||
|
--execute "users create_user ducoterra"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Expose matrix
|
||||||
|
|
||||||
|
1. If you need a domain, follow the [DDNS instructions](/active/container_ddns/ddns.md#install-a-new-ddns-service)
|
||||||
|
2. For a web service, follow the [Caddy instructions](/active/container_caddy/caddy.md#adding-a-new-caddy-record)
|
||||||
|
3. Finally, follow your OS's guide for opening ports via its firewall service.
|
||||||
|
|
||||||
|
#### firewalld
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# command to get current active zone and default zone
|
||||||
|
firewall-cmd --get-active-zones
|
||||||
|
firewall-cmd --get-default-zone
|
||||||
|
|
||||||
|
# command to open 443 on tcp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
|
||||||
|
|
||||||
|
# command to open 80 and 443 on tcp and udp
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
|
||||||
|
|
||||||
|
# command to list available services and then open http and https
|
||||||
|
firewall-cmd --get-services
|
||||||
|
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Backup matrix
|
||||||
|
|
||||||
|
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
|
||||||
|
|
||||||
|
## Upgrade matrix
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
Upgrades should be a repeat of [writing the compose spec](#convert-compose-to-quadlet) and [installing the quadlets](#convert-compose-to-quadlet)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp -r quadlets/. matrix:~/.config/containers/systemd/
|
||||||
|
ssh matrix systemctl --user daemon-reload
|
||||||
|
ssh matrix systemctl --user restart matrix
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
### SELinux
|
||||||
|
|
||||||
|
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
|
||||||
|
|
||||||
|
:z allows a container to share a mounted volume with all other containers.
|
||||||
|
|
||||||
|
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.
|
||||||
3
active/container_matrix/quadlets/README.md
Normal file
3
active/container_matrix/quadlets/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Quadlets
|
||||||
|
|
||||||
|
Put your quadlets here.
|
||||||
13
active/container_matrix/quadlets/matrix.container
Normal file
13
active/container_matrix/quadlets/matrix.container
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[Container]
|
||||||
|
Environment=TUWUNEL_SERVER_NAME=matrix.reeseapps.com TUWUNEL_DATABASE_PATH=/var/lib/tuwunel TUWUNEL_PORT=6167 TUWUNEL_MAX_REQUEST_SIZE=200000000 TUWUNEL_ALLOW_REGISTRATION=false TUWUNEL_ALLOW_FEDERATION=true TUWUNEL_ALLOW_CHECK_FOR_UPDATES=true TUWUNEL_TRUSTED_SERVERS=["matrix.org"] TUWUNEL_ADDRESS=0.0.0.0
|
||||||
|
Image=ghcr.io/matrix-construct/tuwunel:latest
|
||||||
|
PublishPort=8448:6167
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
UserNS=keep-id
|
||||||
|
Volume=/home/matrix/tuwunel-db:/var/lib/tuwunel
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
25
active/container_minecraft/minecraft-compose.yaml
Normal file
25
active/container_minecraft/minecraft-compose.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
services:
|
||||||
|
testcraft:
|
||||||
|
image: gitea.reeseapps.com/services/minecraft:latest
|
||||||
|
stdin_open: true
|
||||||
|
tty: true
|
||||||
|
volumes:
|
||||||
|
- /srv/minecraft/testcraft:/mc_data
|
||||||
|
ports:
|
||||||
|
- 25565:25565
|
||||||
|
environment:
|
||||||
|
- MAX_RAM=4
|
||||||
|
- MIN_RAM=1
|
||||||
|
restart: always
|
||||||
|
nimcraft:
|
||||||
|
image: gitea.reeseapps.com/services/minecraft:latest
|
||||||
|
stdin_open: true
|
||||||
|
tty: true
|
||||||
|
volumes:
|
||||||
|
- /srv/minecraft/nimcraft:/mc_data
|
||||||
|
ports:
|
||||||
|
- 25566:25565
|
||||||
|
environment:
|
||||||
|
- MAX_RAM=4
|
||||||
|
- MIN_RAM=1
|
||||||
|
restart: always
|
||||||
121
active/container_minecraft/minecraft.md
Normal file
121
active/container_minecraft/minecraft.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Podman Template
|
||||||
|
|
||||||
|
- [Podman Template](#podman-template)
|
||||||
|
- [Install minecraft](#install-minecraft)
|
||||||
|
- [Create the minecraft user](#create-the-minecraft-user)
|
||||||
|
- [Convert Compose to Quadlet](#convert-compose-to-quadlet)
|
||||||
|
- [Install Quadlets](#install-quadlets)
|
||||||
|
- [Upgrade Quadlets](#upgrade-quadlets)
|
||||||
|
- [Expose minecraft](#expose-minecraft)
|
||||||
|
- [Backup minecraft](#backup-minecraft)
|
||||||
|
|
||||||
|
## Install minecraft
|
||||||
|
|
||||||
|
Find and replace minecraft with the name of the service.
|
||||||
|
|
||||||
|
### Create the minecraft user
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# SSH into your podman server as root
|
||||||
|
useradd minecraft
|
||||||
|
loginctl enable-linger $(id -u minecraft)
|
||||||
|
systemctl --user --machine=minecraft@.host enable podman-restart
|
||||||
|
systemctl --user --machine=minecraft@.host enable --now podman.socket
|
||||||
|
mkdir -p /home/minecraft/.config/containers/systemd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Convert Compose to Quadlet
|
||||||
|
|
||||||
|
Create a folder called `quadlets` in your podman_minecraft project.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate the systemd service
|
||||||
|
podman run \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--userns keep-id \
|
||||||
|
--rm \
|
||||||
|
-v $(pwd)/active/container_minecraft:/compose \
|
||||||
|
-v $(pwd)/active/container_minecraft/quadlets:/quadlets \
|
||||||
|
quay.io/k9withabone/podlet \
|
||||||
|
-f /quadlets \
|
||||||
|
-i \
|
||||||
|
--overwrite \
|
||||||
|
compose /compose/compose.yaml
|
||||||
|
|
||||||
|
# Copy the files to the server
|
||||||
|
scp -r active/container_minecraft/quadlets/. minecraft:~/.config/containers/systemd/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Install Quadlets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ssh minecraft
|
||||||
|
|
||||||
|
export GAME_SERVER_NAME=testcraft
|
||||||
|
mkdir $GAME_SERVER_NAME
|
||||||
|
|
||||||
|
# Download the server jar (only needed once)
|
||||||
|
podman run \
|
||||||
|
-it \
|
||||||
|
--rm \
|
||||||
|
-e SERVER_VERSION=1.21.8 \
|
||||||
|
-v $(pwd)/$GAME_SERVER_NAME:/downloads \
|
||||||
|
--security-opt label=disable \
|
||||||
|
--userns keep-id \
|
||||||
|
docker.io/ducoterra/get-minecraft:latest
|
||||||
|
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
systemctl --user restart $GAME_SERVER_NAME
|
||||||
|
```
|
||||||
|
|
||||||
|
### Upgrade Quadlets
|
||||||
|
|
||||||
|
```bash
|
||||||
|
scp -r quadlets/. minecraft:~/.config/containers/systemd/
|
||||||
|
ssh minecraft systemctl --user daemon-reload
|
||||||
|
ssh minecraft systemctl --user restart minecraft
|
||||||
|
```
|
||||||
|
|
||||||
|
## Expose minecraft
|
||||||
|
|
||||||
|
1. Create your minecraft ddns record first [following these docs](/active/container_ddns/ddns.md#)
|
||||||
|
2. Create a SRV record in your DNS provider like the following:
|
||||||
|
|
||||||
|
active/container_minecraft/secrets/reeseapps_records.json:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Comment": "CREATE/UPSERT/DELETE a record ",
|
||||||
|
"Changes": [
|
||||||
|
{
|
||||||
|
"Action": "UPSERT",
|
||||||
|
"ResourceRecordSet": {
|
||||||
|
"Name": "_minecraft._tcp.testcraft.reeseapps.com",
|
||||||
|
"Type": "SRV",
|
||||||
|
"TTL": 300,
|
||||||
|
"ResourceRecords": [
|
||||||
|
{
|
||||||
|
"Value": "0 5 25566 minecraft.reeseapps.com"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeseapps-zoneid) --change-batch file://active/container_minecraft/secrets/reeseapps_records.json
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Test your record with `nslookup`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
nslookup -q=srv _minecraft._tcp.testcraft.reeseapps.com
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Access your server at your domain "testcraft.reeseapps.com"
|
||||||
|
|
||||||
|
## Backup minecraft
|
||||||
|
|
||||||
|
Follow the Borg [Create a Backup Service Docs](/active/systemd_borg/borg.md#create-a-backup-service)
|
||||||
14
active/container_minecraft/quadlets/nimcraft.container
Normal file
14
active/container_minecraft/quadlets/nimcraft.container
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[Container]
|
||||||
|
Environment=MAX_RAM=4 MIN_RAM=1
|
||||||
|
Image=gitea.reeseapps.com/services/minecraft:c1ca80b09b4645888e550efb0a2700b2ec1f1645
|
||||||
|
PodmanArgs=--interactive --tty
|
||||||
|
PublishPort=25566:25565
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
UserNS=keep-id
|
||||||
|
Volume=/home/minecraft/nimcraft:/mc_data
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
14
active/container_minecraft/quadlets/testcraft.container
Normal file
14
active/container_minecraft/quadlets/testcraft.container
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
[Container]
|
||||||
|
Environment=MAX_RAM=4 MIN_RAM=1
|
||||||
|
Image=gitea.reeseapps.com/services/minecraft:c1ca80b09b4645888e550efb0a2700b2ec1f1645
|
||||||
|
PodmanArgs=--interactive --tty
|
||||||
|
PublishPort=25565:25565
|
||||||
|
SecurityLabelDisable=true
|
||||||
|
UserNS=keep-id
|
||||||
|
Volume=/home/minecraft/testcraft:/mc_data
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Restart=always
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=default.target
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user