Compare commits

..

87 Commits

Author SHA1 Message Date
380d8f8e48 get cloud-init working consistently
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 55s
2025-11-26 13:54:01 -05:00
07a297f818 Add better intro to README
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 59s
2025-11-21 13:43:23 -05:00
d7224b038b switch to custom caddy with route53 dns support
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 2m21s
2025-11-20 17:58:53 -05:00
fc62219db7 update jellyfin to 10.11.3 2025-11-20 07:24:26 -05:00
2ebd97c345 update gitea to 2.5.1 2025-11-20 07:06:18 -05:00
270e86bfd0 clean up templates 2025-11-20 06:57:11 -05:00
7305e3a35b add cloud-init notes for amazon linux on qemu 2025-11-20 06:56:56 -05:00
aabbd8286f update immich to 2.3.1 2025-11-20 06:56:27 -05:00
37f7d442a1 add missing return 2025-11-20 06:56:09 -05:00
3ff805fa39 clamav don't alert on max size
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m33s
2025-11-18 10:24:53 -05:00
1ae62e70ed move systemd prefixes to software prefixes 2025-11-18 10:01:18 -05:00
91f4687c07 add esphome display notes 2025-11-18 09:57:49 -05:00
dc2df62d04 add borg repo update notes 2025-11-18 09:57:14 -05:00
b75aac76c2 add firewalld inspection notes 2025-11-18 09:57:00 -05:00
5161dced6e add clamav ignore signatures notes 2025-11-18 09:56:44 -05:00
d9ed144578 add lvfs-testing framework 16 update notes 2025-11-18 09:56:08 -05:00
5516f9530b add virsh networking notes
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m50s
2025-11-13 17:05:06 -05:00
621be95870 add fedora43 and default credentials to osbuild 2025-11-13 16:53:39 -05:00
b526901546 add clamav docs 2025-11-13 16:53:02 -05:00
b328081b59 upgrade immich 2025-11-13 16:52:45 -05:00
113b859927 Clarify docker integration with podman on fedora 2025-11-13 16:51:36 -05:00
57ff005186 add selinux files to gitignore 2025-11-13 16:51:13 -05:00
7ccedb9768 move selinux, firewalld, k3s 2025-11-13 16:51:02 -05:00
ef527abef4 expand yubikey docs to include gpg 2025-11-13 16:49:39 -05:00
75f4aaebf1 add clamav docs
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 39s
2025-11-07 13:29:02 -05:00
1396e09227 add gitea demo values compatible with metallb
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 55s
2025-11-03 09:54:40 -05:00
cbe8c4a369 add l2advertisement to metallb address pool k0s config 2025-11-03 09:54:21 -05:00
2f88c75655 Fix super old version of metallb in k0s 2025-11-03 09:42:09 -05:00
0f4b73720c Update metallb configmap to custom resource 2025-11-03 09:24:15 -05:00
b97f41eb70 fedora updates and systemd renames
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 58s
2025-10-30 22:51:53 -04:00
6df02e8dff k0s updates 2025-10-30 22:51:17 -04:00
57ae6b7e72 update dd command for bootable arch installer
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 56s
2025-10-23 10:41:11 -04:00
e3ba1759c4 init software_steam 2025-10-23 10:40:56 -04:00
af70d1d396 add secure boot note to systemd_qemu 2025-10-23 10:40:47 -04:00
5b474c7190 init software_k0s 2025-10-23 10:40:20 -04:00
d94cd01008 init software_virsh 2025-10-23 10:40:10 -04:00
afb27c512c add composer-cli list image command 2025-10-23 10:40:00 -04:00
a500c8a572 add yubikey gpg notes 2025-10-23 10:39:34 -04:00
c5748d81da add tagline to image builder docs
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 3m58s
2025-10-22 16:58:59 -04:00
b38390029f update README ssh notes 2025-10-22 16:57:53 -04:00
b116ea73ec add gpg change key password 2025-10-22 16:57:38 -04:00
920aeef7f3 update key names and add ssh public keys 2025-10-22 16:57:19 -04:00
9038962f29 add image builder notes 2025-10-22 16:57:05 -04:00
3fed164193 add gpg import keys, delete keys, sign, and encrypt files
Some checks failed
Podman DDNS Image / build-and-push-ddns (push) Failing after 2s
2025-10-20 17:25:26 -04:00
487e03c0bd update codium extension list 2025-10-20 12:23:23 -04:00
cf0a7373d4 move pgp to gpg and add export and expiration notes 2025-10-20 12:18:49 -04:00
e0adee5362 add gpg notes and gpg public keys 2025-10-19 21:36:09 -04:00
8f3e624925 Add gpg commit signing to README 2025-10-19 21:01:17 -04:00
e1e551c5cc add inputrc, find, and tmux notes to README
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m55s
2025-10-14 12:38:59 -04:00
23d3949421 improve podman template with checkboxes and better descriptions 2025-10-14 12:38:42 -04:00
714dd32ff6 convert README ssh key gen to script 2025-10-14 12:38:22 -04:00
8035fa38dc update freeipa smart card notes 2025-10-14 12:38:13 -04:00
b91cc1adc3 add install wakeword notes to wyoming 2025-10-14 12:37:55 -04:00
4fe56de990 update pytorch versions required by stable diffusion webui forge 2025-10-14 12:37:33 -04:00
9ef631b266 clarify difference between server and laptop in borg notes 2025-10-14 12:37:14 -04:00
8c39f749c7 add yt-dlp notes 2025-10-14 12:36:56 -04:00
1361c726d9 add bluetooth wireshark notes 2025-10-14 12:36:50 -04:00
1879158b6c add system monitor template backups 2025-10-14 12:36:41 -04:00
7b9968762a add ffpmeg notes 2025-10-14 12:36:29 -04:00
250ffeb266 add pgp notes 2025-10-14 12:36:22 -04:00
de6c1941c5 add firewall rules notes to podman ollama 2025-10-14 12:36:13 -04:00
9bc09a4b98 fix nextcloud deploy user 2025-10-14 12:36:01 -04:00
79377b3653 simplify podman minecraft deploy 2025-10-14 12:35:35 -04:00
d44bca3f2b add podman matrix 2025-10-14 12:35:23 -04:00
660735f0ae simplify podman jellyfin deploy 2025-10-14 12:35:10 -04:00
6dfd30e175 Upgrade podman immich to v2 2025-10-14 12:34:59 -04:00
0e5250d84d fix podman gitea volume permissions 2025-10-14 12:34:32 -04:00
556149c583 remove ansible podman ddns record 2025-10-14 12:34:17 -04:00
72e13f53aa fix caddy ansible playbook link 2025-10-14 12:33:55 -04:00
e9c68abeb9 updates to creating podman bricktracker user 2025-10-14 12:33:44 -04:00
69e8e89e72 truenas iscsi log notes 2025-10-14 12:33:29 -04:00
85e74541c2 rhel initialization with smart card notes 2025-10-14 12:33:11 -04:00
cb66fb6195 fedora snapper, tuned, and selinux policies 2025-10-14 12:32:57 -04:00
8d98cd06fa fix title conflict in arch docs 2025-10-14 12:32:37 -04:00
a85627b3b2 init truenas server device notes 2025-10-14 12:32:12 -04:00
f046e6edc2 add shelly bthome notes and more detailed instructions for existing devices 2025-10-14 12:32:01 -04:00
a32f055ede add home assistant unifi cameras, datetimes, and voice notes 2025-10-14 12:31:35 -04:00
0c6509cc17 add esphome notes 2025-10-14 12:31:14 -04:00
82b60c086c add alicia's printer 2025-10-14 12:31:05 -04:00
999869cab6 add 3dserver device 2025-10-14 12:30:55 -04:00
548cdc8b87 immich init 2025-10-01 22:37:49 -04:00
4832b283bb update podman rootless gitea docs
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 53s
2025-09-07 15:15:28 -04:00
9e83048248 fix gitea ssh key change on every reboot
All checks were successful
Podman DDNS Image / build-and-push-ddns (push) Successful in 1m3s
2025-07-30 22:40:20 -04:00
f2d684fa7c add scrubbing time to important dates and times 2025-07-25 20:42:31 -04:00
7980bfb381 add example json aws route53 record for minecraft 2025-07-25 20:42:18 -04:00
20690c48e5 add section on decrypting and scrubbing pools 2025-07-25 20:42:04 -04:00
ca582333f1 move some fedora software from server to software md 2025-07-25 20:41:51 -04:00
146 changed files with 6821 additions and 722 deletions

View File

@@ -0,0 +1,30 @@
name: Podman DDNS Image
run-name: Build and Push the Custom Caddy Image with Route53 DNS Certbot
on:
push:
paths:
- active/podman_caddy/**
- .gitea/workflows/caddy.yaml
schedule:
- cron: '@daily'
jobs:
build-and-push-ddns:
runs-on: ubuntu-latest
if: gitea.ref == 'refs/heads/main'
steps:
- name: Check out repository code
uses: actions/checkout@v4
- name: Login to Gitea Registry
uses: docker/login-action@v2
with:
registry: gitea.reeseapps.com
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push Docker image
uses: https://github.com/docker/build-push-action@v5
with:
context: ${{ gitea.workspace }}/active/podman_caddy
file: ${{ gitea.workspace }}/active/podman_caddy/Containerfile
push: true
tags: "gitea.reeseapps.com/services/caddy:latest,gitea.reeseapps.com/services/caddy:${{gitea.sha}}"
no-cache: true

5
.gitignore vendored
View File

@@ -7,4 +7,7 @@ __pycache__/
.pytest_cache/
.venv/
.mypy_cache
TODO.md
TODO.md
eicar.com
*.pp
*.mod

View File

@@ -1,14 +1,17 @@
charliermarsh.ruff
continue.continue
davidanson.vscode-markdownlint
ms-python.black-formatter
eamodio.gitlens
franneck94.vscode-python-config
franneck94.vscode-python-dev-extension-pack
ms-pyright.pyright
ms-python.debugpy
ms-python.mypy-type-checker
ms-python.python
ms-toolsai.jupyter
ms-toolsai.jupyter-keymap
ms-toolsai.jupyter-renderers
ms-toolsai.vscode-jupyter-cell-tags
ms-toolsai.vscode-jupyter-slideshow
ms-python.vscode-python-envs
njpwerner.autodocstring
njqdev.vscode-python-typehint
redhat.vscode-yaml
stkb.rewrap
streetsidesoftware.code-spell-checker
yzhang.markdown-all-in-one
tamasfe.even-better-toml
vue.volar
yzhang.markdown-all-in-one

183
README.md
View File

@@ -1,13 +1,34 @@
# Homelab
A project to store homelab stuff.
Welcome to my homelab!
Just here for the Arch distoolbox?
This repo is an in-flux collection of my personal notes, docs, and tutorials of
things I find interesting and self-host.
[Arch Distoolbox](active/software_distoolbox/distoolbox.md)
Take a look around!
![Arch Toolbox
Status](https://gitea.reeseapps.com/services/homelab/actions/workflows/distoolbox.yaml/badge.svg?branch=main)
- "Active" projects (/active) are in use today and generally fall into these
categories:
- `aws_` is for aws notes
- `device_` is for hardware
- `kubernetes_` is for helm charts or other kubernetes hosted software
- `os_` is for operating system setup guides and notes
- `podman_` is for containerized projects
- `software_` is for cli tools, projects without a specific way to host them,
or other misfits
All active projects will have a markdown file named after the project. This is
for quick access via shortcuts like `ctrl + p` in vscode. For example, I want
to check my notes for `virsh` so I would type `ctrl + p` "virsh" to open
"virsh.md".
"Retired" projects (/retired) is a graveyard of things I didn't want to delete.
"Template" projects (/templates) are quick templates for creating new active
projects with sane defaults.
I keep my GPG and SSH keys in `keys` if you want to add those to your keyring
or give me access to your servers.
## Table of Contents
@@ -15,7 +36,12 @@ Status](https://gitea.reeseapps.com/services/homelab/actions/workflows/distoolbo
- [Table of Contents](#table-of-contents)
- [Fun Facts](#fun-facts)
- [Keyboard Shortcuts](#keyboard-shortcuts)
- [inputrc](#inputrc)
- ["find ." shortcuts](#find--shortcuts)
- [tmux](#tmux)
- [bash](#bash)
- [SSH Setup](#ssh-setup)
- [Git GPG Commit Signing](#git-gpg-commit-signing)
- [Important Dates and Times](#important-dates-and-times)
- [Project Lifecycle](#project-lifecycle)
- [Project Types](#project-types)
@@ -35,43 +61,112 @@ On linux, <kbd>ctrl</kbd>+<kbd>shift</kbd>+<kbd>u</kbd>, then, while holding
In vim: `esc + o` will take you to the end of a file and insert a new line.
## SSH Setup
### inputrc
Add this to your `~/.inputrc` to allow ctrl + backspace to delete whole words.
```bash
export REMOTE_USER=${USER}
export REMOTE_HOST=something.com
export REMOTE_PORT=22
"\C-h": backward-kill-word
```
# The following is generated by the above variables. No tweaks necessary.
export KEY_NAME=~/.ssh/id_${REMOTE_USER}_${REMOTE_HOST}
export KEY_COMMENT="${USER}@${HOSTNAME}:${REMOTE_USER}@${REMOTE_HOST}"
### "find ." shortcuts
```bash
# Change file mode for a bunch of directories
find . -type d -exec chmod 755 {} \;
```
### tmux
- Vertical: ctrl + b + "
- Horizontal: ctrl + b + %
- Event Horizontal Distribution: ctrl + b + alt + 1
- Even Vertical Distribution: ctrl + b + alt + 2
- Swap pane order: ctrl + b + : -> swap-pane -t 0
### bash
<https://tecadmin.net/bash-special-variables/>
Here are some handy references for default bash variables
```text
$0 The name of the script being executed.
$1-$9 The first nine command-line arguments.
$# The number of command-line arguments.
$* All command-line arguments as a single string.
$@ All command-line arguments as an array.
$? The exit status of the last executed command.
$$ The process ID of the current shell.
$! The process ID of the last background command.
$- Shows the current shell options or flags.
```
And here are the meanings of the shell options
```text
h Remember the location of commands as they are looked up
i Interactive shell
m Job control is enabled
B Brace expansion is enabled
H History substitution is enabled
```
So to check if you are in an interactive shell:
```bash
[ $- == *i* ]] && Some command here
```
## SSH Setup
Generate a key (password protect it!)
```bash
# Pick one of the below key types
# ed25519
ssh-keygen -C ${KEY_COMMENT} -f ${KEY_NAME} -t ed25519
ssh-keygen -C ssh@ducoterra.net -t ed25519
# rsa 4096
ssh-keygen -C ${KEY_COMMENT} -f ${KEY_NAME} -t rsa -b 4096
ssh-keygen -C ssh@ducoterra.net -t rsa -b 4096
cat <<EOF >> ~/.ssh/config
# Inspect a key
ssh-keygen -l -f ~/.ssh/id_rsa
Host ${REMOTE_HOST}
Hostname ${REMOTE_HOST}
IdentityFile ${KEY_NAME}
User ${REMOTE_USER}
Port ${REMOTE_PORT}
EOF
# Change the password
ssh-keygen -p -f ~/.ssh/id_rsa
```
In your ~/.ssh/config, add the following line to set the default key
```conf
IdentityFile ~/.foo/identity
```
Then add a host to your local computer
```bash
Host <hostname>
Hostname <host.something.com or IP address>
User <remote user>
Port <remote port>
```
And copy the key to a remote computer
```bash
# Copy the generated key to the server using password auth. Assumes password auth enabled.
ssh-copy-id -o PubkeyAuthentication=no -i ${KEY_NAME} ${REMOTE_USER}@${REMOTE_HOST}
ssh-copy-id -f -i ~/.ssh/id_ed25519 ${REMOTE_USER}@${REMOTE_HOST}
# Log into the server with your key
ssh -i ${KEY_NAME} ${KEY_COMMENT}
ssh -i ${KEY_NAME} ${REMOTE_HOST}
# Copy authorized_keys to root
sudo mkdir -p /root/.ssh
sudo cp ~/.ssh/authorized_keys /root/.ssh/authorized_keys
exit
# login and disable password auth
ssh ${REMOTE_HOST}
mkdir -p /etc/ssh/sshd_config.d
echo "PasswordAuthentication no" > /etc/ssh/sshd_config.d/01-prohibit-password.conf
systemctl restart sshd
@@ -87,17 +182,31 @@ ssh -o PubkeyAuthentication=no ducoterra@${SSH_HOST}.reeselink.com
ssh $SSH_HOST
```
## Git GPG Commit Signing
1. Use `gpg --list-key 'git@ducoterra.net'` to find your key
2. Use `git config --global user.signingkey 0A46826A...` to set the signing key
3. Use `gpg --export -a 'git@ducoterra.net'` to export the key to copy into Gitea/Github/Gitlab
Now you can sign commits with `git commit -S`.
Alternatively, you can sign every commit by default with `git config --global commit.gpgsign true`.
You can verify a commit with `git verify-commit e1e551c`. If the commit is
signed you'll see an output. If not, nothing will show.
## Important Dates and Times
| Time | Day | Description |
| ----- | -------- | ------------------------------ |
| 00:00 | All | Automated builds |
| 00:00 | All | NAS Snapshots |
| 02:00 | All | Backups |
| 04:00 | Saturday | Server Hardware Updates |
| 05:00 | Saturday | Server VM Updates |
| 05:00 | All | Unifi Protect Firmware Updates |
| 06:00 | All | Unifi Network Firmware Updates |
| Time | Day | Description |
| ----- | -------- | ---------------------------------- |
| 00:00 | All | Automated builds |
| 00:00 | All | NAS Snapshots |
| 02:00 | All | Backups |
| 04:00 | All | Bare Metal Server Security Updates |
| 05:00 | All | VM Server Security Updates |
| 05:00 | All | Unifi Protect Firmware Updates |
| 06:00 | All | Unifi Network Firmware Updates |
| 06:00 | Saturday | Truenas Disk Scrub |
## Project Lifecycle
@@ -123,8 +232,7 @@ All projects will be prefixed with one of the following categories:
- `device_`
- `os_`
- `cloud_`
- `systemd_`
- `software_`
- `podman_`
- `docker_`
- `kubernetes_`
@@ -149,15 +257,12 @@ be prefixed with the cloud's name, not the word "cloud". So AWS services will
be prefixed with `aws_` and azure would be `azure_`. This should make them more
searchable.
`systemd_` projects are designed to be installed with ansible and run via
systemd on a linux VM or other linux hardware.
`software_` projects record configuration for common software agnostic to
operating system or linux flavor.
`podman_` projects are either designed to be run as quadlets or as podman
containers outright.
`docker_` projects are either docker-compose or some form of docker run
command.
`kubernetes_` projects are helm, kustomize, kubectl, or some other kubernetes
compliant deployment.

View File

@@ -0,0 +1,14 @@
# 3D Server Hardware
## Motherboard
B650 GAMING X AX rev 1.5
<https://www.gigabyte.com/Motherboard/B650-GAMING-X-AX-rev-15/support#dl>
- Enable PBO
- Enable XMP
- Enable SVM
- Enable PCIe x4x4x4x4 bifurcation
- Enable Power always back on
- Fans to full speed

View File

@@ -0,0 +1,11 @@
# Epson ET 2800
## Printer Setup
1. Download and install the drivers at <https://support.epson.net/linux/Printer/LSB_distribution_pages/en/escpr.php>
2. Settings -> Printers -> Add
3. Select LPD/LPR Host or Printer
4. Enter the address: `lpd://<ip_address>`
5. Select Epson, then select Epson ET-2800 Series
6. Save
7. Print

5
active/device_esphome/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
# Gitignore settings for ESPHome
# This is an example and may include too much for your use-case.
# You can modify this file to suit your needs.
/.esphome/
/secrets.yaml

View File

@@ -0,0 +1,370 @@
substitutions:
name: m5stack-atom-echo
friendly_name: M5Stack Atom Echo
esphome:
name: ${name}
name_add_mac_suffix: true
friendly_name: ${friendly_name}
min_version: 2025.5.0
esp32:
board: m5stack-atom
cpu_frequency: 240MHz
framework:
type: esp-idf
logger:
api:
ota:
- platform: esphome
id: ota_esphome
wifi:
ap:
captive_portal:
button:
- platform: factory_reset
id: factory_reset_btn
name: Factory reset
i2s_audio:
- id: i2s_audio_bus
i2s_lrclk_pin: GPIO33
i2s_bclk_pin: GPIO19
microphone:
- platform: i2s_audio
id: echo_microphone
i2s_din_pin: GPIO23
adc_type: external
pdm: true
sample_rate: 16000
correct_dc_offset: true
speaker:
- platform: i2s_audio
id: echo_speaker
i2s_dout_pin: GPIO22
dac_type: external
bits_per_sample: 16bit
sample_rate: 16000
channel: stereo # The Echo has poor playback audio quality when using mon audio
buffer_duration: 60ms
media_player:
- platform: speaker
name: None
id: echo_media_player
announcement_pipeline:
speaker: echo_speaker
format: WAV
codec_support_enabled: false
buffer_size: 6000
volume_min: 0.4
files:
- id: timer_finished_wave_file
file: https://github.com/esphome/wake-word-voice-assistants/raw/main/sounds/timer_finished.wav
on_announcement:
- if:
condition:
- microphone.is_capturing:
then:
- script.execute: stop_wake_word
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
brightness: 100%
effect: none
on_idle:
- script.execute: start_wake_word
- script.execute: reset_led
voice_assistant:
id: va
micro_wake_word:
microphone:
microphone: echo_microphone
channels: 0
gain_factor: 4
media_player: echo_media_player
noise_suppression_level: 2
auto_gain: 31dBFS
on_listening:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
effect: "Slow Pulse"
on_stt_vad_end:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
effect: "Fast Pulse"
on_tts_start:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
brightness: 100%
effect: none
on_end:
# Handle the "nevermind" case where there is no announcement
- wait_until:
condition:
- media_player.is_announcing:
timeout: 0.5s
# Restart only mWW if enabled; streaming wake words automatically restart
- if:
condition:
- lambda: return id(wake_word_engine_location).state == "On device";
then:
- wait_until:
- and:
- not:
voice_assistant.is_running:
- not:
speaker.is_playing:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- script.execute: reset_led
on_error:
- light.turn_on:
id: led
red: 100%
green: 0%
blue: 0%
brightness: 100%
effect: none
- delay: 2s
- script.execute: reset_led
on_client_connected:
- delay: 2s # Give the api server time to settle
- script.execute: start_wake_word
on_client_disconnected:
- script.execute: stop_wake_word
on_timer_finished:
- script.execute: stop_wake_word
- wait_until:
not:
microphone.is_capturing:
- switch.turn_on: timer_ringing
- light.turn_on:
id: led
red: 0%
green: 100%
blue: 0%
brightness: 100%
effect: "Fast Pulse"
- wait_until:
- switch.is_off: timer_ringing
- light.turn_off: led
- switch.turn_off: timer_ringing
binary_sensor:
# button does the following:
# short click - stop a timer
# if no timer then restart either microwakeword or voice assistant continuous
- platform: gpio
pin:
number: GPIO39
inverted: true
name: Button
disabled_by_default: true
entity_category: diagnostic
id: echo_button
on_multi_click:
- timing:
- ON for at least 50ms
- OFF for at least 50ms
then:
- if:
condition:
switch.is_on: timer_ringing
then:
- switch.turn_off: timer_ringing
else:
- script.execute: start_wake_word
- timing:
- ON for at least 10s
then:
- button.press: factory_reset_btn
light:
- platform: esp32_rmt_led_strip
id: led
name: None
disabled_by_default: true
entity_category: config
pin: GPIO27
default_transition_length: 0s
chipset: SK6812
num_leds: 1
rgb_order: grb
effects:
- pulse:
name: "Slow Pulse"
transition_length: 250ms
update_interval: 250ms
min_brightness: 50%
max_brightness: 100%
- pulse:
name: "Fast Pulse"
transition_length: 100ms
update_interval: 100ms
min_brightness: 50%
max_brightness: 100%
script:
- id: reset_led
then:
- if:
condition:
- lambda: return id(wake_word_engine_location).state == "On device";
- switch.is_on: use_listen_light
then:
- light.turn_on:
id: led
red: 100%
green: 89%
blue: 71%
brightness: 60%
effect: none
else:
- if:
condition:
- lambda: return id(wake_word_engine_location).state != "On device";
- switch.is_on: use_listen_light
then:
- light.turn_on:
id: led
red: 0%
green: 100%
blue: 100%
brightness: 60%
effect: none
else:
- light.turn_off: led
- id: start_wake_word
then:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: return id(wake_word_engine_location).state == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: return id(wake_word_engine_location).state == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- id: stop_wake_word
then:
- if:
condition:
lambda: return id(wake_word_engine_location).state == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- if:
condition:
lambda: return id(wake_word_engine_location).state == "On device";
then:
- micro_wake_word.stop:
switch:
- platform: template
name: Use listen light
id: use_listen_light
optimistic: true
restore_mode: RESTORE_DEFAULT_ON
entity_category: config
on_turn_on:
- script.execute: reset_led
on_turn_off:
- script.execute: reset_led
- platform: template
id: timer_ringing
optimistic: true
restore_mode: ALWAYS_OFF
on_turn_off:
# Turn off the repeat mode and disable the pause between playlist items
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_OFF)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 0);
# Stop playing the alarm
- media_player.stop:
announcement: true
on_turn_on:
# Turn on the repeat mode and pause for 1000 ms between playlist items/repeats
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_ONE)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 1000);
- media_player.speaker.play_on_device_media_file:
media_file: timer_finished_wave_file
announcement: true
- delay: 15min
- switch.turn_off: timer_ringing
select:
- platform: template
entity_category: config
name: Wake word engine location
id: wake_word_engine_location
optimistic: true
restore_value: true
options:
- In Home Assistant
- On device
initial_option: On device
on_value:
- if:
condition:
lambda: return x == "In Home Assistant";
then:
- micro_wake_word.stop:
- delay: 500ms
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- if:
condition:
lambda: return x == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- delay: 500ms
- micro_wake_word.start:
micro_wake_word:
on_wake_word_detected:
- voice_assistant.start:
wake_word: !lambda return wake_word;
vad:
models:
- model: okay_nabu
- model: hey_mycroft
- model: hey_jarvis

View File

@@ -0,0 +1,249 @@
# ESP32
- [ESP32](#esp32)
- [Install](#install)
- [Devices](#devices)
- [Lilygo tdongle](#lilygo-tdongle)
- [Local Flashing](#local-flashing)
- [Adding a New Device](#adding-a-new-device)
- [Controlling Home Assistant](#controlling-home-assistant)
- [Configuration Sections](#configuration-sections)
- [esphome](#esphome)
- [esp32](#esp32-1)
- [logger](#logger)
- [api](#api)
- [wifi](#wifi)
- [ota](#ota)
- [captive portal](#captive-portal)
- [button](#button)
- [i2s audio](#i2s-audio)
- [microphone](#microphone)
- [speaker](#speaker)
- [media player](#media-player)
- [voice assistant](#voice-assistant)
- [micro wake word](#micro-wake-word)
- [light](#light)
- [binary sensor](#binary-sensor)
- [lambda](#lambda)
- [Display](#display)
## Install
```bash
# Check that you have python 3.11 installed
uv python list --only-installed
# Create the venv (python 3.11 is recommended in the docs)
uv venv --python 3.11
# Install esphome
uv pip install esphome wheel pip
source .venv/bin/activate
```
## Devices
### Lilygo tdongle
Display: 80 X 160
## Local Flashing
Make sure your permissions are set correctly
```bash
sudo usermod -a -G dialout ducoterra
```
Then "run" your config file
```bash
cd active/device_esp32
uv venv
uv pip install esphome
source .venv/bin/activate
esphome run m5stack-atom-echo.yaml
```
## Adding a New Device
1. Create a new yaml configuration file called "my-device-device-type.yaml"
## Controlling Home Assistant
<https://esphome.io/components/api/#api-actions>
## Configuration Sections
<https://esphome.io/components/>
### esphome
### esp32
<https://esphome.io/components/esp32/#configuration-variables>
### logger
<https://esphome.io/components/logger/>
### api
<https://esphome.io/components/api/>
### wifi
<https://esphome.io/components/wifi/>
### ota
<https://esphome.io/components/ota/>
<https://esphome.io/components/ota/esphome/>
### captive portal
<https://esphome.io/components/captive_portal/>
### button
<https://esphome.io/components/button/>
### i2s audio
<https://esphome.io/components/i2s_audio/>
### microphone
<https://esphome.io/components/microphone/>
<https://esphome.io/components/microphone/i2s_audio/>
### speaker
<https://esphome.io/components/speaker/i2s_audio/>
### media player
<https://esphome.io/components/media_player/speaker/>
Sometimes you'll need to convert media files to supported encoders.
```bash
ffmpeg -i input.flac output.wav
```
To play media on other devices from home assistant, put the
```yaml
action: media_player.play_media
target:
entity_id: media_player.kitchen_google_home
data:
media_content_type: "audio/wav"
media_content_id: "media-source://media_source/local/wake_word_triggered.wav"
```
### voice assistant
<https://esphome.io/components/voice_assistant/>
In Home Assistant's configuration.yaml, add the following to listen to
audio recordings of your voice request:
```bash
assist_pipeline:
debug_recording_dir: /share/assist_pipeline
```
### micro wake word
<https://esphome.io/components/micro_wake_word/>
### light
<https://esphome.io/components/light/#light-effects>
### binary sensor
<https://esphome.io/components/binary_sensor/>
### lambda
<https://esphome.io/automations/templates/#config-lambda>
> id(...) is a helper function that makes ESPHome fetch an object with the
> supplied ID (which you defined somewhere else, like top_end_stop ) and lets
> you call any of ESPHomes many APIs directly. For example, here were
> retrieving the current state of the end stop using .state and using it to
> construct our cover state.
### Display
Display pages
```yaml
display:
- platform: st7735
spi_id: spi_lcd
model: "INITR_MINI160X80"
reset_pin: GPIO1
cs_pin: GPIO4
dc_pin: GPIO2
rotation: 270
device_width: 82
device_height: 161
col_start: 0
row_start: 0
eight_bit_color: true
invert_colors: true
use_bgr: true
auto_clear_enabled: true
id: my_display
pages:
- id: page1
lambda: |-
it.print(0, 10, id(font_roboto), "Connecting to");
it.print(0, 30, id(font_roboto), "Home Assistant...");
- id: page2
lambda: |-
it.print(0, 10, id(font_roboto), "Configuring");
it.print(0, 30, id(font_roboto), "sensors...");
- id: page3
lambda: |-
it.print(0, 10, id(font_roboto), "Loading");
it.print(0, 30, id(font_roboto), "important");
it.print(0, 50, id(font_roboto), "update...");
- id: page4
lambda: |-
it.image(0, 0, id(my_image), COLOR_OFF, COLOR_ON);
```
Switch pages
```yaml
interval:
- interval: 5s
then:
- display.page.show_next: my_display
- component.update: my_display
```
Show an image
```yaml
image:
- file: "test_tdongle_image.png"
type: RGB
id: my_image
```
Specify a font
```yaml
font:
- file: "gfonts://Roboto"
id: font_roboto
size: 20
```

View File

@@ -0,0 +1,386 @@
esphome:
name: great-room-atom-echo
friendly_name: Great Room Atom Echo
esp32:
board: m5stack-atom
framework:
type: esp-idf
# Enable logging
logger:
level: debug
# Enable Home Assistant API
api:
encryption:
key: !secret great_room_atom_echo_key
wifi:
ssid: !secret wifi_ssid
password: !secret wifi_password
domain: .reeselink.com
fast_connect: true
enable_btm: true
on_disconnect:
- light.turn_on:
id: led
blue: 0%
red: 100%
green: 0%
effect: "Slow Pulse"
# Enable fallback hotspot (captive portal) in case wifi connection fails
ap:
ssid: "Great-Room-Atom-Echo"
password: !secret hotspot_password
ota:
- platform: esphome
password: !secret ota_password
captive_portal:
button:
- platform: factory_reset
id: factory_reset_btn
name: Factory reset
i2s_audio:
- id: i2s_audio_bus
i2s_lrclk_pin: GPIO33
i2s_bclk_pin: GPIO19
microphone:
- platform: i2s_audio
id: echo_microphone
i2s_din_pin: GPIO23
adc_type: external
pdm: true
sample_rate: 16000
correct_dc_offset: true
speaker:
- platform: i2s_audio
id: echo_speaker
i2s_dout_pin: GPIO22
dac_type: external
bits_per_sample: 16bit
sample_rate: 16000
channel: stereo # The Echo has poor playback audio quality when using mon audio
buffer_duration: 60ms
media_player:
- platform: speaker
name: None
id: echo_media_player
announcement_pipeline:
speaker: echo_speaker
format: WAV
codec_support_enabled: false
buffer_size: 6000
volume_min: 1
volume_max: 1
volume_initial: 1
files:
- id: timer_finished_wave_file
file: https://github.com/esphome/wake-word-voice-assistants/raw/main/sounds/timer_finished.wav
on_announcement:
- if:
condition:
- microphone.is_capturing:
then:
- script.execute: stop_wake_word
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
brightness: 100%
effect: none
on_idle:
- script.execute: start_wake_word
- script.execute: reset_led
voice_assistant:
id: va
micro_wake_word:
microphone:
microphone: echo_microphone
channels: 0
gain_factor: 64
media_player: echo_media_player
noise_suppression_level: 2
auto_gain: 31dBFS
on_listening:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
effect: "Slow Pulse"
on_stt_vad_end:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
effect: "Fast Pulse"
on_tts_start:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
brightness: 100%
effect: none
on_end:
# Handle the "nevermind" case where there is no announcement
- wait_until:
condition:
- media_player.is_announcing:
timeout: 0.5s
# Restart only mWW if enabled; streaming wake words automatically restart
- if:
condition:
- lambda: return id(wake_word_engine_location).state == "On device";
then:
- wait_until:
- and:
- not:
voice_assistant.is_running:
- not:
speaker.is_playing:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- script.execute: reset_led
on_error:
- light.turn_on:
id: led
red: 100%
green: 0%
blue: 0%
brightness: 100%
effect: none
- delay: 2s
- script.execute: reset_led
on_client_connected:
- delay: 2s # Give the api server time to settle
- script.execute: start_wake_word
on_client_disconnected:
- script.execute: stop_wake_word
on_timer_finished:
- script.execute: stop_wake_word
- wait_until:
not:
microphone.is_capturing:
- switch.turn_on: timer_ringing
- light.turn_on:
id: led
red: 0%
green: 100%
blue: 0%
brightness: 100%
effect: "Fast Pulse"
- wait_until:
- switch.is_off: timer_ringing
- light.turn_off: led
- switch.turn_off: timer_ringing
binary_sensor:
# button does the following:
# short click - stop a timer
# if no timer then restart either microwakeword or voice assistant continuous
- platform: gpio
pin:
number: GPIO39
inverted: true
name: Button
disabled_by_default: true
entity_category: diagnostic
id: echo_button
on_multi_click:
- timing:
- ON for at least 50ms
- OFF for at least 50ms
then:
- if:
condition:
switch.is_on: timer_ringing
then:
- switch.turn_off: timer_ringing
else:
- script.execute: start_wake_word
- timing:
- ON for at least 10s
then:
- button.press: factory_reset_btn
light:
- platform: esp32_rmt_led_strip
id: led
name: None
disabled_by_default: true
entity_category: config
pin: GPIO27
default_transition_length: 0s
chipset: SK6812
num_leds: 1
rgb_order: grb
effects:
- pulse:
name: "Slow Pulse"
transition_length: 250ms
update_interval: 250ms
min_brightness: 50%
max_brightness: 100%
- pulse:
name: "Fast Pulse"
transition_length: 100ms
update_interval: 100ms
min_brightness: 50%
max_brightness: 100%
script:
- id: reset_led
then:
- if:
condition:
- lambda: return id(wake_word_engine_location).state == "On device";
- switch.is_on: use_listen_light
then:
- light.turn_on:
id: led
red: 100%
green: 89%
blue: 71%
brightness: 60%
effect: none
else:
- if:
condition:
- lambda: return id(wake_word_engine_location).state != "On device";
- switch.is_on: use_listen_light
then:
- light.turn_on:
id: led
red: 0%
green: 100%
blue: 100%
brightness: 60%
effect: none
else:
- light.turn_off: led
- id: start_wake_word
then:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: return id(wake_word_engine_location).state == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: return id(wake_word_engine_location).state == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- id: stop_wake_word
then:
- if:
condition:
lambda: return id(wake_word_engine_location).state == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- if:
condition:
lambda: return id(wake_word_engine_location).state == "On device";
then:
- micro_wake_word.stop:
switch:
- platform: template
name: Use listen light
id: use_listen_light
optimistic: true
restore_mode: RESTORE_DEFAULT_ON
entity_category: config
on_turn_on:
- script.execute: reset_led
on_turn_off:
- script.execute: reset_led
- platform: template
id: timer_ringing
optimistic: true
restore_mode: ALWAYS_OFF
on_turn_off:
# Turn off the repeat mode and disable the pause between playlist items
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_OFF)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 0);
# Stop playing the alarm
- media_player.stop:
announcement: true
on_turn_on:
# Turn on the repeat mode and pause for 1000 ms between playlist items/repeats
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_ONE)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 1000);
- media_player.speaker.play_on_device_media_file:
media_file: timer_finished_wave_file
announcement: true
- delay: 15min
- switch.turn_off: timer_ringing
select:
- platform: template
entity_category: config
name: Wake word engine location
id: wake_word_engine_location
optimistic: true
restore_value: true
options:
- In Home Assistant
- On device
initial_option: On device
on_value:
- if:
condition:
lambda: return x == "In Home Assistant";
then:
- micro_wake_word.stop:
- delay: 500ms
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- if:
condition:
lambda: return x == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- delay: 500ms
- micro_wake_word.start:
micro_wake_word:
on_wake_word_detected:
- voice_assistant.start:
wake_word: !lambda return wake_word;
vad:
models:
- model: okay_nabu
- model: hey_mycroft
- model: hey_jarvis

View File

@@ -0,0 +1,118 @@
esphome:
name: tdongle
friendly_name: tdongle
esp32:
board: esp32-s3-devkitc-1
framework:
type: esp-idf
flash_size: 16MB
logger:
# Enable Home Assistant API
api:
encryption:
key: !secret lilygo_tdongle_key
wifi:
ssid: !secret wifi_ssid
password: !secret wifi_password
domain: .reeselink.com
fast_connect: true
enable_btm: true
id: wifithing
# on_connect:
# - component.update: my_online_image
ota:
- platform: esphome
password: !secret ota_password
captive_portal:
binary_sensor:
- platform: gpio
pin: GPIO0
name: Button
spi:
- id: spi_led
clk_pin: GPIO39
mosi_pin: GPIO40
- id: spi_lcd
clk_pin: GPIO5
mosi_pin: GPIO3
output:
- platform: ledc
frequency: 2000
pin: GPIO38
inverted: True
id: backlight_output
light:
- platform: monochromatic
output: backlight_output
name: "LCD Backlight"
id: lcd_backlight
restore_mode: ALWAYS_ON
# RGB Led, APA102 on GPIO39/GPIO40
- platform: spi_led_strip
spi_id: spi_led
num_leds: 1
name: "FastLED SPI Light"
data_rate: 1MHz # Adjust as needed, APA102 supports up to 20MHz, 1MHz is a safe starting point
display:
- platform: st7735
spi_id: spi_lcd
model: "INITR_MINI160X80"
reset_pin: GPIO1
cs_pin: GPIO4
dc_pin: GPIO2
rotation: 270
device_width: 82
device_height: 161
col_start: 0
row_start: 0
eight_bit_color: true
invert_colors: true
use_bgr: true
auto_clear_enabled: true
id: my_display
pages:
- id: page1
lambda: |-
it.print(0, 10, id(font_roboto), "Connecting to");
it.print(0, 30, id(font_roboto), "Home Assistant...");
- id: page2
lambda: |-
it.print(0, 10, id(font_roboto), "Configuring");
it.print(0, 30, id(font_roboto), "sensors...");
- id: page3
lambda: |-
it.print(0, 10, id(font_roboto), "Loading");
it.print(0, 30, id(font_roboto), "important");
it.print(0, 50, id(font_roboto), "update...");
- id: page4
lambda: |-
it.image(0, 0, id(my_image), COLOR_OFF, COLOR_ON);
image:
- file: "test_tdongle_image.png"
type: RGB
id: my_image
http_request:
font:
- file: "gfonts://Roboto"
id: font_roboto
size: 20
interval:
- interval: 5s
then:
- display.page.show_next: my_display
- component.update: my_display

View File

@@ -0,0 +1,387 @@
esphome:
name: loft-atom-echo
friendly_name: Loft Atom Echo
esp32:
board: m5stack-atom
cpu_frequency: 240MHz
framework:
type: esp-idf
# Enable logging
logger:
level: debug
# Enable Home Assistant API
api:
encryption:
key: !secret loft_atom_echo_key
wifi:
ssid: !secret wifi_ssid
password: !secret wifi_password
domain: .reeselink.com
fast_connect: true
enable_btm: true
on_disconnect:
- light.turn_on:
id: led
blue: 0%
red: 100%
green: 0%
effect: "Slow Pulse"
# Enable fallback hotspot (captive portal) in case wifi connection fails
ap:
ssid: "Loft-Atom-Echo"
password: !secret hotspot_password
ota:
- platform: esphome
password: !secret ota_password
captive_portal:
button:
- platform: factory_reset
id: factory_reset_btn
name: Factory reset
i2s_audio:
- id: i2s_audio_bus
i2s_lrclk_pin: GPIO33
i2s_bclk_pin: GPIO19
microphone:
- platform: i2s_audio
id: echo_microphone
i2s_din_pin: GPIO23
adc_type: external
pdm: true
sample_rate: 16000
correct_dc_offset: true
speaker:
- platform: i2s_audio
id: echo_speaker
i2s_dout_pin: GPIO22
dac_type: external
bits_per_sample: 16bit
sample_rate: 16000
channel: stereo # The Echo has poor playback audio quality when using mon audio
buffer_duration: 60ms
media_player:
- platform: speaker
name: None
id: echo_media_player
announcement_pipeline:
speaker: echo_speaker
format: WAV
codec_support_enabled: false
buffer_size: 6000
volume_min: 1
volume_max: 1
volume_initial: 1
files:
- id: timer_finished_wave_file
file: https://github.com/esphome/wake-word-voice-assistants/raw/main/sounds/timer_finished.wav
on_announcement:
- if:
condition:
- microphone.is_capturing:
then:
- script.execute: stop_wake_word
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
brightness: 100%
effect: none
on_idle:
- script.execute: start_wake_word
- script.execute: reset_led
voice_assistant:
id: va
micro_wake_word:
microphone:
microphone: echo_microphone
channels: 0
gain_factor: 64
media_player: echo_media_player
noise_suppression_level: 2
auto_gain: 31dBFS
on_listening:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
effect: "Slow Pulse"
on_stt_vad_end:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
effect: "Fast Pulse"
on_tts_start:
- light.turn_on:
id: led
blue: 100%
red: 0%
green: 0%
brightness: 100%
effect: none
on_end:
# Handle the "nevermind" case where there is no announcement
- wait_until:
condition:
- media_player.is_announcing:
timeout: 0.5s
# Restart only mWW if enabled; streaming wake words automatically restart
- if:
condition:
- lambda: return id(wake_word_engine_location).state == "On device";
then:
- wait_until:
- and:
- not:
voice_assistant.is_running:
- not:
speaker.is_playing:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- script.execute: reset_led
on_error:
- light.turn_on:
id: led
red: 100%
green: 0%
blue: 0%
brightness: 100%
effect: none
- delay: 2s
- script.execute: reset_led
on_client_connected:
- delay: 2s # Give the api server time to settle
- script.execute: start_wake_word
on_client_disconnected:
- script.execute: stop_wake_word
on_timer_finished:
- script.execute: stop_wake_word
- wait_until:
not:
microphone.is_capturing:
- switch.turn_on: timer_ringing
- light.turn_on:
id: led
red: 0%
green: 100%
blue: 0%
brightness: 100%
effect: "Fast Pulse"
- wait_until:
- switch.is_off: timer_ringing
- light.turn_off: led
- switch.turn_off: timer_ringing
binary_sensor:
# button does the following:
# short click - stop a timer
# if no timer then restart either microwakeword or voice assistant continuous
- platform: gpio
pin:
number: GPIO39
inverted: true
name: Button
disabled_by_default: true
entity_category: diagnostic
id: echo_button
on_multi_click:
- timing:
- ON for at least 50ms
- OFF for at least 50ms
then:
- if:
condition:
switch.is_on: timer_ringing
then:
- switch.turn_off: timer_ringing
else:
- script.execute: start_wake_word
- timing:
- ON for at least 10s
then:
- button.press: factory_reset_btn
light:
- platform: esp32_rmt_led_strip
id: led
name: None
disabled_by_default: true
entity_category: config
pin: GPIO27
default_transition_length: 0s
chipset: SK6812
num_leds: 1
rgb_order: grb
effects:
- pulse:
name: "Slow Pulse"
transition_length: 250ms
update_interval: 250ms
min_brightness: 50%
max_brightness: 100%
- pulse:
name: "Fast Pulse"
transition_length: 100ms
update_interval: 100ms
min_brightness: 50%
max_brightness: 100%
script:
- id: reset_led
then:
- if:
condition:
- lambda: return id(wake_word_engine_location).state == "On device";
- switch.is_on: use_listen_light
then:
- light.turn_on:
id: led
red: 100%
green: 89%
blue: 71%
brightness: 60%
effect: none
else:
- if:
condition:
- lambda: return id(wake_word_engine_location).state != "On device";
- switch.is_on: use_listen_light
then:
- light.turn_on:
id: led
red: 0%
green: 100%
blue: 100%
brightness: 60%
effect: none
else:
- light.turn_off: led
- id: start_wake_word
then:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: return id(wake_word_engine_location).state == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- micro_wake_word.start:
- if:
condition:
and:
- not:
- voice_assistant.is_running:
- lambda: return id(wake_word_engine_location).state == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- id: stop_wake_word
then:
- if:
condition:
lambda: return id(wake_word_engine_location).state == "In Home Assistant";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- if:
condition:
lambda: return id(wake_word_engine_location).state == "On device";
then:
- micro_wake_word.stop:
switch:
- platform: template
name: Use listen light
id: use_listen_light
optimistic: true
restore_mode: RESTORE_DEFAULT_ON
entity_category: config
on_turn_on:
- script.execute: reset_led
on_turn_off:
- script.execute: reset_led
- platform: template
id: timer_ringing
optimistic: true
restore_mode: ALWAYS_OFF
on_turn_off:
# Turn off the repeat mode and disable the pause between playlist items
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_OFF)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 0);
# Stop playing the alarm
- media_player.stop:
announcement: true
on_turn_on:
# Turn on the repeat mode and pause for 1000 ms between playlist items/repeats
- lambda: |-
id(echo_media_player)
->make_call()
.set_command(media_player::MediaPlayerCommand::MEDIA_PLAYER_COMMAND_REPEAT_ONE)
.set_announcement(true)
.perform();
id(echo_media_player)->set_playlist_delay_ms(speaker::AudioPipelineType::ANNOUNCEMENT, 1000);
- media_player.speaker.play_on_device_media_file:
media_file: timer_finished_wave_file
announcement: true
- delay: 15min
- switch.turn_off: timer_ringing
select:
- platform: template
entity_category: config
name: Wake word engine location
id: wake_word_engine_location
optimistic: true
restore_value: true
options:
- In Home Assistant
- On device
initial_option: On device
on_value:
- if:
condition:
lambda: return x == "In Home Assistant";
then:
- micro_wake_word.stop:
- delay: 500ms
- lambda: id(va).set_use_wake_word(true);
- voice_assistant.start_continuous:
- if:
condition:
lambda: return x == "On device";
then:
- lambda: id(va).set_use_wake_word(false);
- voice_assistant.stop:
- delay: 500ms
- micro_wake_word.start:
micro_wake_word:
on_wake_word_detected:
- voice_assistant.start:
wake_word: !lambda return wake_word;
vad:
models:
- model: okay_nabu
- model: hey_mycroft
- model: hey_jarvis

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 KiB

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -11,3 +11,13 @@ sudo curl -o /etc/udev/rules.d/50-qmk.rules https://raw.githubusercontent.com/qm
sudo udevadm control --reload-rules
sudo udevadm trigger
```
## Beta Bios Updates
```bash
# With charger attached
sudo fwupdmgr enable-remote lvfs-testing
sudo fwupdmgr refresh --force
sudo fwupdmgr get-updates
sudo fwupdmgr update
```

View File

@@ -1,6 +1,7 @@
# Home Assistant
- [Home Assistant](#home-assistant)
- [Certificates](#certificates)
- [Setup and Configuration](#setup-and-configuration)
- [Schlage Door Lock](#schlage-door-lock)
- [Philips Hue Lights](#philips-hue-lights)
@@ -13,8 +14,38 @@
- [Roku Remote](#roku-remote)
- [Flair Vent Battery](#flair-vent-battery)
- [Voice](#voice)
- [Changing the Voice of TTS](#changing-the-voice-of-tts)
- [Custom Sentences](#custom-sentences)
- [Overriding Default Sentences](#overriding-default-sentences)
- [Notifications](#notifications)
- [Unifi Cameras](#unifi-cameras)
- [Multiple Entity Triggers with Custom Names](#multiple-entity-triggers-with-custom-names)
- [Philips Hue Switches](#philips-hue-switches)
- [Datetimes](#datetimes)
- [LG TV Switch](#lg-tv-switch)
## Certificates
Note, self signed certs won't work on the hass android app.
```bash
# Generate the key/cert
# Note, 36159 days == 99 years
openssl req \
-sha256 \
-addext "subjectAltName = IP:10.2.0.230" \
-newkey rsa:4096 \
-nodes \
-keyout privkey.pem \
-x509 \
-days 36159 \
-out fullchain.pem
http:
server_port: 8123
ssl_certificate: /ssl/fullchain.pem
ssl_key: /ssl/privkey.pem
```
## Setup and Configuration
@@ -400,10 +431,47 @@ Flair vents report low battery at 2.4v. 3v is nominal/full.
## Voice
### Changing the Voice of TTS
Select a media player -> play TTS -> select voice -> copy voice ID.
```yaml
options:
voice: DavisNeural||chat
```
### Custom Sentences
<https://developers.home-assistant.io/docs/voice/intent-recognition/template-sentence-syntax/#sentence-templates-syntax>
### Overriding Default Sentences
1. Identify if your sentence conflicts with [Home Assistant's default
sentences](https://github.com/OHF-Voice/intents/tree/main/sentences/en)
2. Create a new file at `/config/custom_sentences/en/overrides.yaml`
3. As an example, to override the `HassGetWeather` sentence:
1. Copy the contents of `weather_HassGetWeather.yaml` into `overrides.yaml`
2. Rename `HassGetWeather` to `HassGetWeather_Custom`
3. Delete the required context `weather`
4. Now in `configuration.yaml`, under a section called `intent_script`, add the following
```yaml
HassGetWeather_Custom:
speech:
text: >-
It's {{ int(states("sensor.backyard_weather_station_temp")) }} degrees
with {{ states("sensor.backyard_weather_station_humidity") }}% humidity.
I'm seeing {{ int(states("sensor.backyard_weather_station_wind_speed"))
}}mph wind. It's rained {{
int(states("sensor.backyard_weather_station_hourly_rain_rate")) }} inches
in the last hour.
```
5. Restart Home Assistant
6. Navigate to Settings -> Voice Assistants -> Click the 3 dots next to
your voice assistant -> Debug -> Click the icon in the top right -> Run
text pipeline -> "What's the weather"
## Notifications
Notification Information:
@@ -413,3 +481,230 @@ Notification Information:
```yaml
Triggered by {{ trigger.entity_id }}, Date: {{ now().strftime('%Y-%m-%d') }}, Time: {{ now().strftime('%H:%M') }}
```
## Unifi Cameras
Create image/video previews of events with the following automation:
```yaml
alias: Vehicle Driveway Notification
description: Sends a notification with video upon motion detection.
triggers:
- entity_id:
- binary_sensor.driveway_camera_vehicle_detected
trigger: state
from: "on"
to: "off"
actions:
- data:
message: Vehicle detected on Driveway Camera
data:
image: >-
/api/unifiprotect/thumbnail/{{ config_entry_id(trigger.entity_id)
}}/{{ trigger.from_state.attributes.event_id }}
video: >-
/api/unifiprotect/video/{{ config_entry_id(trigger.entity_id) }}/{{
trigger.from_state.attributes.event_id }}
action: notify.notify
mode: single
max_exceeded: silent
```
## Multiple Entity Triggers with Custom Names
You can set an "id" for a trigger that can be used as a human readable name.
```yaml
alias: Notify when a Door Opened
description: ""
triggers:
- trigger: state
entity_id:
- binary_sensor.my_front_door
from: "off"
to: "on"
id: Front Door
- trigger: state
entity_id:
- binary_sensor.my_back_door
from: "off"
to: "on"
id: Back Door
- trigger: state
entity_id:
- binary_sensor.super_secret_door
from: "off"
to: "on"
id: Trap Door
conditions: []
actions:
- action: notify.notify
metadata: {}
data:
message: "{{ trigger.id }} Opened"
mode: single
```
## Philips Hue Switches
Philips Hue Switches don't expose entities, but rather trigger "zha_event" events.
To see events fired by these devices: Developer tools -> Events -> Listen to events `zha_event`
You can use this in automations like so:
```yaml
alias: Some Switch
description: ""
triggers:
- device_id: bb54b111ec77fb7d5356bb600789098f
domain: zha
type: remote_button_short_press
subtype: turn_on
trigger: device
id: "on"
- device_id: bb54b111ec77fb7d5356bb600789098f
domain: zha
type: remote_button_long_press
subtype: turn_on
trigger: device
id: on-con
conditions: []
actions:
- action: scene.turn_on
metadata: {}
data: {}
target:
entity_id: scene.some_scene
mode: single
```
## Datetimes
Stolen from Reddit
```yaml
## Set placeholder templates for reference in this template
## 'dt' substitutes 'now()'
## eg. if currently 5 March 2024 at 09:08:07 (AM)
eg_now = {% set eg_now = "2024-03-05 09:08:07.123456+00:00" %}{{ eg_now }}
dt = {% set dt = eg_now | as_datetime %}{{ dt }}
ts = {% set ts = eg_now | as_timestamp %}{{ ts }}
## Basic Time & Date Functions
time_now: {{ now() }}
time_local: {{ now() | as_local }}
time_timestamp: {{ now() | as_timestamp }}
## Time Conversions
seconds_per_min : {% set spm = 60 | int %}{{ spm }}
seconds_per_hour: {% set sph = ( spm * 60 ) | int %}{{ sph }}
seconds_per_day : {% set spd = 86400 | int %}{{ spd }}
seconds_per_week: {% set spw = ( spd * 7 ) | int %}{{ spw }}
minutes_per_day : {% set mpd = ( spd / 60 ) | int %}{{ mpd }}
minutes_per_week: {% set mpw = ( mpd * 7 ) | int %}{{ mpw }}
hours_per_week : {% set hpw = ( 24 * 7 ) | int %}{{ hpw }}
## Time Calculations
## with DATETIME use timedelta:
* CURRENT TIME : {{ dt }}
+ 1 YEAR : {{ dt + timedelta(days=365) }}
- 1 DAY (24H) : {{ dt - timedelta(days=1) }}
+ 3 DAYS (72H) : {{ dt + timedelta(days=3) }}
- 3 HOURS : {{ dt - timedelta(hours=3) }}
+ 1 HR 26 MIN : {{ dt + timedelta(hours=1, minutes=26) }}
+ 1D 2H 3M 4S : {{ dt + timedelta(days=1, hours=2, minutes=3, seconds=4) }}
## with TIMESTAMP use maths and then convert:
## Referencing earlier calculations for ease
* TIMESTAMP : {{ ts }}
* CURRENT TIME : {{ ts | as_datetime }}
+ 1 YEAR : {{ ( ts + (spd * 365) ) | as_datetime }}
- 1 DAY (24H) : {{ ( ts - spd ) | as_datetime }}
+ 3 DAYS (72H) : {{ ( ts + (spd * 3) ) | as_datetime }}
- 3 HOURS : {{ ( ts - (sph * 3) ) | as_datetime }}
+ 1 HR 26 MIN : {{ ( ts + sph + (spm * 26) ) | as_datetime }}
+ 1D 2H 3M 4S : {{ ( ts + spd + (sph * 2) + (spm * 3) + 4 ) | as_datetime }}
## Adjusting Time & Date For Calculations
Start Of Today: {% set start_today = dt.replace(hour=0, minute=0, second=0, microsecond=0) %}{{ start_today }}
End Of Today : {% set start_tomorrow = start_today + timedelta(days=1) %}{{ start_tomorrow }}
## Use Relative Time For DATETIME in the PAST
relative_time: {{ relative_time( start_today ) }} ago
## For time in the FUTURE you can use:
{% set current_time = dt %}{% set future_time = as_local(dt) %}{% set time_distance = future_time - current_time %}
relative_future: In {{ relative_time(current_time - time_distance) }}
## Use Time Templates combined with History Stats Sensor:
sensor:
- platform: history_stats
name: Lamp ON today
entity_id: light.my_lamp
state: "on"
```
Stolen from <https://www.fabriziomusacchio.com/blog/2021-08-15-strftime_Cheat_Sheet/>
| Format | Example | Description |
| ------ | ------------------------ | -------------------------------------------------------------------------------------------------- |
| %c | Thu Jan 28 12:32:01 2014 | locales appropriate date and time representation |
| %D | 23/05/12 | formats the date |
| %F | 2002-01-30 | date in ISO 8601 format YYYY-MM-DD |
| %x | 02/10/11 | locales appropriate date representation |
| %X | 14:22:01 | locales appropriate time representation |
| %r | 3:44:12 AM | 12-hour time |
| %R | 15:21 | 24-hour time HH:MM |
| %T | 15:21:59 | time in ISO 8601 format HH:MM:SS |
| %A | Monday | full weekday name |
| %a | Mon | abbreviated weekday name |
| %w | 0-6 | day of the week with Sunday as 0 |
| %d | 01-31 | day of the month (with a leading zero) |
| %e | 1-31 | day of the month (without a leading zero) |
| %B | April | full month name |
| %b | Apr | abbreviated month name |
| %m | 01-12 | month of the year (with a leading zero) |
| %-m | 1-12 | month of the year (without a leading zero) |
| %Y | 2003 | year |
| %y | 00-99 | year without a century (last two digits, with a leading zero) |
| %-y | 0-99 | year without a century (last two digits, without a leading zero) |
| %H | 00-23 | hour of the day, 24-hour time (with a leading zero) |
| %k | 0-23 | hour of the day, 24-hour time (without a leading zero) |
| %I | 01-11 | hour of the day, 12-hour time (with a leading zero) |
| %-I | 1-11 | hour of the day, 12-hour time (without a leading zero) |
| %P | am, pm | am or pm designation |
| %p | AM, PM | AM or PM designation |
| %M | 00-59 | minute of the hour (with a leading zero) |
| %-M | 0-59 | minute of the hour (without a leading zero) |
| %S | 00-60 | second of the minute (with a leading zero) |
| %-S | 0-60 | second of the minute (without a leading zero) |
| %f | 000000-999999 | microsecond of the second (with a leading zero) |
| %Z | UTC | timezone name or abbreviation |
| %z | +0000 | UTC offset in the form +HHMM or -HHMM |
| %s | | amount of seconds since 1970-01-01 00:00:00 UTC |
| %% | | % sign |
| %j | 001-366 | day of the year (with a leading zeroes) |
| %U | 00-53 | week number with the first Sunday as the first day of week one |
| %W | 00-53 | week number of the current year, starting with the first Monday as the first day of the first week |
| %V | 01-53 | week number in ISO 8601 format |
## LG TV Switch
```yaml
- platform: wake_on_lan
mac: b4:b2:91:8e:ce:20
name: loft_lg_tv_wol
turn_off:
service: media_player.turn_off
target:
device_id: "{{device_id('media_player.loft_lg_tv')}}"
- platform: wake_on_lan
mac: 60:8d:26:2c:4d:45
name: living_room_lg_tv_wol
turn_off:
service: media_player.turn_off
target:
device_id: "{{device_id('media_player.living_room_lg_tv')}}"
```

View File

@@ -1,5 +1,47 @@
# Shelly Devices
- [Shelly Devices](#shelly-devices)
- [1PM Mini Gen4](#1pm-mini-gen4)
- [Setup 1PM Mini Gen4](#setup-1pm-mini-gen4)
- [Install 1PM Mini Gen4](#install-1pm-mini-gen4)
- [Shelly Plug US](#shelly-plug-us)
- [Shelly BLU Motion](#shelly-blu-motion)
- [Shelly BLU Door/Window](#shelly-blu-doorwindow)
- [Reset](#reset)
- [Shelly Flood](#shelly-flood)
## 1PM Mini Gen4
### Setup 1PM Mini Gen4
1. Cut 1 white and 3 black pieces of 14 gauge wire to 3" long.
2. Strip 1/4" from one side of each wire.
3. Strip 1/2" from the other side of each wire.
4. Connect the 1/4" side to the shelly. Tighten the screws until you can't turn them.
5. Push line and neutral into a standard outlet. The wider receptacle is neutral.
6. Press and hold the button for 10 seconds to factory reset. Light will flash on/off every 1/4 second.
7. Press and hold the button for 5 seconds to turn on AP mode. Light will flash on/off every 1/2 second.
8. Connect to shelly network.
9. Navigate to <http://192.168.33.1>.
10. Connect to wifi. The light should turn solid.
11. Update firmware.
12. Set a password for AP mode.
13. Turn off the AP.
14. In Unifi: Name the device, give it a fixed IP, set the icon.
15. Navigate to the Shelly website via its IP address.
16. Set a password for http access: Settings -> Authentication.
17. Name the device: Settings -> Device name.
18. Set Restore last known state of output/relay: Home -> Output -> Input/Output Settings.
19. Enable Zigbee: Zigbee -> Enable.
20. Connect Shelly to Home Assistant via Zigbee.
21. Change switch type: Click on switch control -> Settings -> Show as.
### Install 1PM Mini Gen4
1. Cut 1 3" white wire for neutral bridge.
2. Cut 2 3" black wires for line bridge and light switch input.
3. Prepare 4 14 gauge wire connectors.
## Shelly Plug US
1. Connect to WiFi
@@ -11,8 +53,36 @@
7. Enable Bluetooth Gateway
8. Update Firmware
## Shelly BLU Motion
1. Download and install the Shelly Debug app
2. Follow the instructions in the app to connect the device
3. Update the firmware
4. Enable encryption (generate a 6 digit code)
5. "Read" from the device and copy the encryption key for home assistant
## Shelly BLU Door/Window
1. Download and install the Shelly Debug app
2. Follow the instructions in the app to connect the device
3. Update the firmware
4. Create a new "login" in Bitwarden called "Shelly BLU DW " + name of device
1. Password will be the encryption key
2. Website should be the MAC address of the Shelly
5. Generate a 6 digit code, send it to your phone then throw it away
6. In the Shelly Debug app, enable encryption using the 6 digit code
7. Copy the encryption and store in the password field
8. Add to Home Assistant
9. Unpair from Phone
### Reset
Resetting is super finnicky. You'll need to plug it in, press and hold the power button until the
red light flashes quickly (not slowly, that's a reboot). You'll probably have to do it multiple
times because they seem to reboot halfway through the reset process.
times because they seem to reboot halfway through the reset process.
## Shelly Flood
1. In the web interface, ensure "CoIoT" is enabled and pointing to `<home assistant ip>:5683`.
Allow 5683/udp from shelly flood to home assistant. If you don't do this Shelly Flood will
not report its status correctly!

View File

View File

@@ -1,7 +1,99 @@
# Yubikey
- [Yubikey](#yubikey)
- [Configuration](#configuration)
- [Software](#software)
- [GPG](#gpg)
- [Saving GPG key to card](#saving-gpg-key-to-card)
- [Using the GPG key on a Yubikey](#using-the-gpg-key-on-a-yubikey)
- [Factory Reset](#factory-reset)
## Configuration
1. You will likely need the [udev
rules](https://support.yubico.com/hc/en-us/articles/360013708900-Using-Your-YubiKey-with-Linux)
to use the AppImage configuration tool on linux even if your udev version is above 244.
## Software
The [Yubikey Manager](https://www.yubico.com/support/download/yubikey-manager/) is deprecated.
Use the [Yubikey Authenticator](https://www.yubico.com/products/yubico-authenticator/) for GUI.
## GPG
### Saving GPG key to card
<https://support.yubico.com/hc/en-us/articles/360013790259-Using-Your-YubiKey-with-OpenPGP>
On Fedora you'll need to add the following polkit rules to access your smart card.
```bash
export MY_USER=ducoterra
echo <<EOF > /etc/polkit-1/rules.d/10-pcsc-custom.rules
polkit.addRule(function(action, subject) {
if (action.id == "org.debian.pcsc-lite.access_pcsc" &&
subject.user == "${MY_USER}") {
return polkit.Result.YES;
}
});
polkit.addRule(function(action, subject) {
if (action.id == "org.debian.pcsc-lite.access_card" &&
action.lookup("reader") == 'Yubico YubiKey OTP+FIDO+CCID 00 00' &&
subject.user == "${MY_USER}") {
return polkit.Result.YES;
}
});
EOF
```
Now you can add your key to your card.
```bash
gpg --edit-key 1234ABC
# Save both the signature and authentication keys
> keytocard
# Do not save or your key will be deleted locally
> quit
```
Check the keys on the yubikey with
```bash
gpg --card-status
```
Once your keys have been loaded, change the pin.
```bash
gpg --change-pin
```
### Using the GPG key on a Yubikey
<https://github.com/drduh/YubiKey-Guide?tab=readme-ov-file#notes>
```bash
export GPG_EMAIL='myemail@example.com'
# Import the public key. Without this the key won't show up.
gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys ${GPG_EMAIL}
# Trust the key
gpg --quick-set-ownertrust ${GPG_EMAIL} full
# Yubikey should now show up
gpg --list-secret-keys
```
### Factory Reset
```bash
gpg --edit-card
> admin
> factory-reset
```

View File

@@ -1,3 +1,13 @@
# Kubernetes
See [k3s](/active/systemd_k3s/k3s.md)
## CLI Tools
kubectl: <https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/>
helm: <https://helm.sh/docs/intro/install/>
## Install a Kubernetes Server
For k3s, see [k3s](/active/systemd_k3s/k3s.md)
For k0s, see [k0s](/active/systemd_k0s/k0s.md)

View File

@@ -0,0 +1,11 @@
service:
http:
type: LoadBalancer
externalTrafficPolicy: Cluster
annotations:
metallb.io/allow-shared-ip: gitea
ssh:
type: LoadBalancer
externalTrafficPolicy: Cluster
annotations:
metallb.io/allow-shared-ip: gitea

View File

@@ -21,14 +21,10 @@ ingress:
persistence:
enabled: true
create: true
storageClass: zfs-iscsi-enc0
claimName: data-gitea-staging-0
annotations:
"helm.sh/resource-policy": keep
global:
storageClass: zfs-iscsi-enc1
postgresql:
enabled: true
image:
@@ -36,7 +32,6 @@ postgresql:
primary:
persistence:
enabled: true
storageClass: zfs-iscsi-enc1
annotations:
"helm.sh/resource-policy": keep

View File

@@ -1,6 +1,7 @@
# Gitea
- [Gitea](#gitea)
- [Demo](#demo)
- [Staging](#staging)
- [Install](#install)
- [Backup and Restore](#backup-and-restore)
@@ -14,6 +15,17 @@ they decide to change things. This is the first chart (besides ingress-nginx) wh
we need to pay attention to the MetalLB annotation. This has been set in the values.yaml
file.
## Demo
```bash
helm upgrade --install \
gitea \
gitea-charts/gitea \
--values active/kubernetes_gitea/gitea-demo-values.yaml \
--namespace gitea \
--create-namespace
```
## Staging
There is a `gitea-staging.yaml` file with staging values. This should be installed in

View File

@@ -10,8 +10,14 @@
```bash
# Download the updated template from github
kubectl kustomize "github.com/rancher/local-path-provisioner/deploy?ref=v0.0.31" > active/kubernetes_local-path-provisioner/local-path-storage.yaml
kubectl kustomize "github.com/rancher/local-path-provisioner/deploy?ref=v0.0.32" > active/kubernetes_local-path-provisioner/local-path-storage.yaml
# Apply customizations (ssd/hdd storage, read write many support)
kubectl kustomize active/kubernetes_local-path-provisioner | kubectl apply -f -
```
Mark the class as default
```bash
kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
```

View File

@@ -176,7 +176,7 @@ spec:
fieldPath: metadata.namespace
- name: CONFIG_MOUNT_PATH
value: /etc/config/
image: rancher/local-path-provisioner:v0.0.31
image: rancher/local-path-provisioner:v0.0.32
imagePullPolicy: IfNotPresent
name: local-path-provisioner
volumeMounts:

View File

@@ -5,6 +5,15 @@ below installs nimcraft. For each installation you'll want to create your own va
with a new port. The server-downloader is called "minecraft_get_server" and is available on
[Github](https://github.com/ducoterra/minecraft_get_server).
After installing, you can run admin commands (like whitelisting players) by
attaching to the container:
```bash
kubectl attach -it <pod>
> /whitelist add ducoterra
```
## Testing
```bash

View File

@@ -56,10 +56,10 @@ spec:
value: "1"
resources:
requests:
memory: {{ div .Values.max_ram 2 }}Gi
memory: "{{ div .Values.max_ram 2 }}Gi"
cpu: 1m
limits:
memory: {{ add 1 .Values.max_ram }}Gi
memory: "{{ add 1 .Values.max_ram }}Gi"
cpu: {{ .Values.max_cpu | quote }}
volumes:
- name: data

View File

@@ -5,7 +5,6 @@ metadata:
annotations:
"helm.sh/resource-policy": keep
spec:
storageClassName: ssd
accessModes:
- ReadWriteOnce
resources:

View File

@@ -2,11 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
annotations:
metallb.universe.tf/address-pool: "external"
external-dns.alpha.kubernetes.io/hostname: {{ .Release.Name }}.reeseapps.com
spec:
ipFamilies: ["IPv6"]
externalTrafficPolicy: Cluster
selector:
app: {{ .Release.Name }}

View File

@@ -11,9 +11,9 @@ instructions for building a:
- [Arch Base](#arch-base)
- [Table of Contents](#table-of-contents)
- [Installation](#installation)
- [Installing Arch](#installing-arch)
- [Preparation](#preparation)
- [Installation](#installation-1)
- [Installation](#installation)
- [Post Install](#post-install)
- [Backup (or restore)](#backup-or-restore)
- [Base Tools](#base-tools)
@@ -58,7 +58,7 @@ instructions for building a:
- [VLAN Setup](#vlan-setup)
- [Date and Time](#date-and-time)
## Installation
## Installing Arch
### Preparation
@@ -70,10 +70,15 @@ Follow most of the instructions here: <https://wiki.archlinux.org/title/Installa
```bash
gpg --auto-key-locate clear,wkd -v --locate-external-key pierre@archlinux.org
gpg --keyserver-options auto-key-retrieve --verify archlinux-...
gpg --verify signature_file.sig archlinux.iso
```
3. Create a bootable ISO <https://wiki.archlinux.org/title/USB_flash_installation_medium>
```bash
dd bs=4M if=path/to/archlinux-version-x86_64.iso of=/dev/disk/by-id/usb-My_flash_drive conv=fsync oflag=direct status=progress
```
1. If you are booting into a VM, create an ISO with installation files so you don't have to
copy-paste:

View File

@@ -3,7 +3,7 @@
- [Fedora Kinoite](#fedora-kinoite)
- [TPM2 Luks Decryption](#tpm2-luks-decryption)
- [Podman](#podman)
- [Autostarting services with quadlets](#autostarting-services-with-quadlets)
- [Docker Compose and Docker Buildkit with Rootless Podman](#docker-compose-and-docker-buildkit-with-rootless-podman)
- [rpm-ostree](#rpm-ostree)
- [Git, Vim, etc](#git-vim-etc)
- [Libvirt, Qemu, KVM](#libvirt-qemu-kvm)
@@ -71,6 +71,19 @@ export REGISTRY_AUTH_FILE=$HOME/.podman-auth.json
Source that and then run `podman login` to create the file.
### Docker Compose and Docker Buildkit with Rootless Podman
Allows you to use podman with full docker-compose compatibility.
<https://emersion.fr/blog/2025/using-podman-compose-and-buildkit/>
```bash
rpm-ostree install docker-compose docker-buildx
reboot
systemctl --user enable --now podman.socket
docker context create podman --docker host=unix://$XDG_RUNTIME_DIR/podman/podman.sock
docker context use podman
### Autostarting services with quadlets
If you want to run something as your user at boot (like a systemd process, think ollama) you can

View File

@@ -2,23 +2,28 @@
- [Fedora Server](#fedora-server)
- [Installation](#installation)
- [Power Profiles with Tuned](#power-profiles-with-tuned)
- [Setup SSH](#setup-ssh)
- [DNF](#dnf)
- [Fail2Ban](#fail2ban)
- [BTRFS Parent Volumes](#btrfs-parent-volumes)
- [BTRFS Snapshots](#btrfs-snapshots)
- [Snapper Installation](#snapper-installation)
- [Snapper Cleanup](#snapper-cleanup)
- [BTRFS Maintenance](#btrfs-maintenance)
- [TPM2 Luks Decryption](#tpm2-luks-decryption)
- [Change your password](#change-your-password)
- [Automatic Updates](#automatic-updates)
- [Monitoring](#monitoring)
- [Glances](#glances)
- [Disk Usage](#disk-usage)
- [Disk Wear](#disk-wear)
- [Common Storage Mounts](#common-storage-mounts)
- [Network Bridge](#network-bridge)
- [Virtualization](#virtualization)
- [Troubleshooting](#troubleshooting)
- [Virtualization Troubleshooting](#virtualization-troubleshooting)
- [QEMU Images](#qemu-images)
- [Shared directory with VM Guest](#shared-directory-with-vm-guest)
- [Firewalld](#firewalld)
- [Backups](#backups)
- [Connect to the ISCSI Backup Target](#connect-to-the-iscsi-backup-target)
@@ -28,10 +33,8 @@
- [Troubleshooting Backup ISCSI Connection](#troubleshooting-backup-iscsi-connection)
- [Quick Backup](#quick-backup)
- [Regular Backups with Borg](#regular-backups-with-borg)
- [Version Upgrades](#version-upgrades)
- [Optional Steps](#optional-steps)
- [Docker with Podman as Runtime](#docker-with-podman-as-runtime)
- [Vanilla Docker](#vanilla-docker)
- [Extra Software](#extra-software)
- [Disable Swap](#disable-swap)
- [Disable Selinux](#disable-selinux)
- [Downgrading Kernel](#downgrading-kernel)
@@ -40,7 +43,7 @@
- [LVM Thin Provisioning](#lvm-thin-provisioning)
- [Set eui64 on network interface](#set-eui64-on-network-interface)
- [Install and Enable Cockpit](#install-and-enable-cockpit)
- [Troubleshooting](#troubleshooting-1)
- [Troubleshooting](#troubleshooting)
- [Cockpit Terminal Unusable or Weird Colors](#cockpit-terminal-unusable-or-weird-colors)
- [Chroot into a mounted disk](#chroot-into-a-mounted-disk)
- [Resize Last Partition to Fill Available Space](#resize-last-partition-to-fill-available-space)
@@ -72,6 +75,12 @@ and the operator will store information about each server.
5. Take note of the ipv4 and ipv6 address. Update any DNS records at this time.
6. Install and reboot
## Power Profiles with Tuned
1. `dnf install tuned`
2. `systemctl enable --now tuned`
3. `tuned-adm profile virtual-host`
## Setup SSH
See [README](/README.md#ssh-setup)
@@ -153,6 +162,8 @@ mount -a --mkdir
<http://snapper.io/manpages/snapper-configs.html>
### Snapper Installation
We'll be using snapper, a tool for automating and controlling snapshot behavior.
```bash
@@ -172,13 +183,19 @@ systemctl enable --now snapper-timeline.timer
systemctl enable --now snapper-cleanup.timer
# Enable snapshots on boot
systemctl enable --now snapper-boot.timer
```
### Snapper Cleanup
```bash
# List snapshots
snapper -c root list
# Create snapshot manually
snapper -c root create --description "test snapshot"
# Delete first snapshot
snapper -c root delete 1
# Delete snapshots between 655-857
snapper -c root delete 655-857
```
Note - you probably don't want to keep yearly snapshots.
@@ -187,8 +204,14 @@ Edit `/etc/snapper/configs/root` and change `TIMELINE_LIMIT_YEARLY=` to `0`.
## BTRFS Maintenance
```bash
# Start a scrub in the foreground (-B) at /
btrfs scrub start -B /
# Start a scrub with low impact/priority at / (good for servers)
btrfs scrub start -c idle /
# Start a scrub in the foreground and monitor
btrfs scrub start -c idle -B -d /
# Check for errors
dmesg -T | grep btrfs
```
## TPM2 Luks Decryption
@@ -281,9 +304,30 @@ In Cockpit navigate to software updates -> automatic updates -> install -> secur
In Cockpit: Overview -> View metrics and history -> Install PCP Support -> Metrics settings -> Turn on Collect Metrics
### Glances
```bash
dnf install -y glances python3-jinja2
systemctl enable --now glances
firewall-cmd --permanent --zone=FedoraServer --add-port=61208/tcp
firewall-cmd --reload
```
### Disk Usage
TODO
```bash
# Show size of folder exclude snapshots
du --exclude .snapshots -sh .
# Show size of all files in your current dir
for folder in $(ls); do du --exclude .snapshots -sh $folder; done
# Calculate all folder sizes in current dir
alias {dudir,dud}='du -h --max-depth 1 | sort -h'
# Calculate all file sizes in current dir
alias {dufile,duf}='ls -lhSr'
```
### Disk Wear
@@ -346,7 +390,7 @@ systemctl enable --now libvirtd
Install the cockpit machines application.
### Troubleshooting
### Virtualization Troubleshooting
```bash
# Oops, I did this after I installed virtualization
@@ -372,6 +416,12 @@ qemu-img convert -f vmdk -O raw in.vmdk out.img
qemu-img convert -f qcow2 -O raw in.raw out.img
```
### Shared directory with VM Guest
```bash
mount -t virtiofs [mount tag] [mount point]
```
## Firewalld
Set the default firewalld zone to `public`
@@ -384,6 +434,8 @@ Set the default firewalld zone to `public`
Firewalld will be on and blocking by default. You can check the zone and allowed ports with:
```bash
firewall-cmd --get-active-zones
firewall-cmd --get-default-zone
firewall-cmd --zone=public --list-ports
firewall-cmd --zone=public --list-services
```
@@ -395,6 +447,21 @@ firewall-cmd --permanent --zone=public --add-port=9090/tcp
firewall-cmd --reload
```
Remove cockpit with
```bash
firewall-cmd --permanent --zone=public --remove-port=9090/tcp
```
Add a custom source for a service
```bash
sudo firewall-cmd --new-zone=home --permanent
sudo firewall-cmd --zone=home --add-source=10.2.0.0/24 --permanent
sudo firewall-cmd --zone=home --add-port=10700/tcp --permanent
sudo firewall-cmd --reload
```
## Backups
Note: this assumes you've set up [an iscsi backup disk](/active/os_truenas/truenas.md#iscsi-backup-volumes)
@@ -426,7 +493,7 @@ iscsiadm -m node \
systemctl restart iscsid
# Discover targets
iscsiadm -m discovery -t st -p driveripper.reeselink.com
iscsiadm -m discovery -t st -p drivework.reeselink.com
# Login to all nodes
iscsiadm -m node -l
@@ -486,46 +553,20 @@ rsync -av --progress --exclude '.snapshots' /btrfs/yellow/root /btrfs/backup-yel
See [borg.md](/active/systemd_borg/borg.md)
## Version Upgrades
```bash
# Make sure to be fully up to date first
dnf upgrade --refresh
reboot
# Set the releasever to the version you want to upgrade to
dnf system-upgrade download --releasever=43
dnf system-upgrade reboot
```
## Optional Steps
### Docker with Podman as Runtime
Note, you'll need to ssh into the server as the user in order to start the user's systemd session.
```bash
sudo dnf install podman docker docker-compose
sudo loginctl enable-linger 1000 # Or whatever user
systemctl --user enable --now podman.socket
docker context create podman --docker host=unix://$XDG_RUNTIME_DIR/podman/podman.sock
docker context use podman
```
### Vanilla Docker
<https://docs.docker.com/engine/install/fedora/>
```bash
dnf -y install dnf-plugins-core
dnf-3 config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
systemctl enable --now docker
```
### Extra Software
```bash
# Set vim as the default editor
dnf install -y vim-default-editor --allowerasing
# Install glances for system monitoring
dnf install -y glances
# ZSH
dnf install -y zsh
chsh -s $(which zsh) && chsh -s $(which zsh) ducoterra
```
### Disable Swap
```bash
@@ -625,6 +666,8 @@ mkfs.xfs /dev/mapper/vg0-docker--data
```bash
nmcli connection modify Wired\ connection\ 1 ipv6.addr-gen-mode eui64
nmcli connection modify Wired\ connection\ 1 ipv6.ip6-privacy disabled
systemctl restart NetworkManager
```
### Install and Enable Cockpit

View File

@@ -1,7 +1,8 @@
# Apps
# Fedora Software
- [Apps](#apps)
- [Fedora Software](#fedora-software)
- [Common CLI Apps](#common-cli-apps)
- [Podman](#podman)
- [Gear Lever](#gear-lever)
- [VSCode](#vscode)
- [DNF](#dnf)
@@ -53,6 +54,12 @@
- [Ollama](#ollama)
- [UV](#uv)
- [Pipenv](#pipenv)
- [Docker](#docker)
- [Boxes](#boxes)
- [ffmpeg](#ffmpeg)
- [AMD GPU VAAPI ffmpeg Acceleration](#amd-gpu-vaapi-ffmpeg-acceleration)
- [Containers](#containers)
- [XSane](#xsane)
Flatpak installs are from Flathub unless otherwise noted.
@@ -84,8 +91,6 @@ sudo dnf install \
ansible \
# Terminal multiplexer.
tmux \
# Multimedia player with support for a wide range of codecs and file formats.
ffmpeg \
# Microsoft Windows compatibility layer.
wine \
# Archive utility similar to GNU tar, used to package files into single archive files.
@@ -109,7 +114,7 @@ sudo dnf install \
# Document conversion tool and markup language converter.
pandoc \
# Comprehensive LaTeX distribution for high-quality typesetting of documents.
texlive-latex \
texlive-latex texlive-scheme-full \
# Generate strong passwords.
pwgen \
# Reattach to running processes
@@ -124,6 +129,12 @@ sudo dnf install \
gcc
```
## Podman
```bash
dns install -y podman
```
## Gear Lever
I would recommend you install Gear Lever to manage App Images:
@@ -198,6 +209,10 @@ flatpak install com.bitwarden.desktop
Video player (like VLC but can frame-by-frame in reverse).
```bash
# DNF
dnf install mpv
# Flatpak
flatpak install io.mpv.Mpv
```
@@ -587,6 +602,8 @@ flatpak install org.gnome.Evolution
```bash
# Virtualization
sudo dnf group install --with-optional virtualization
sudo systemctl enable --now libvirtd virtnetworkd.service
```
## NVM
@@ -613,3 +630,91 @@ For starting ollama as a service, follow the link below:
## Pipenv
<https://pipenv.pypa.io/en/latest/installation.html#installing-pipenv>
## Docker
<https://docs.docker.com/engine/install/fedora/>
```bash
dnf -y install dnf-plugins-core
dnf-3 config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
dnf install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
systemctl enable --now docker
```
Or use docker with podman with full docker-compose compatibility.
<https://emersion.fr/blog/2025/using-podman-compose-and-buildkit/>
```bash
dnf install -y docker-compose docker-buildx
systemctl --user enable --now podman.socket
docker context create podman --docker host=unix://$XDG_RUNTIME_DIR/podman/podman.sock
docker context use podman
```
## Boxes
Virtualization at its boxiest.
```bash
flatpak install org.gnome.Boxes
```
## ffmpeg
- 1080p h264 at 10M is good quality
### AMD GPU VAAPI ffmpeg Acceleration
1. Enable [RPM Fusion](https://docs.fedoraproject.org/en-US/quick-docs/rpmfusion-setup/)
2. Install [ffmpeg non-free](https://rpmfusion.org/Howto/Multimedia)
```bash
# Enable RPM Fusion
sudo dnf install \
https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-$(rpm -E %fedora).noarch.rpm
sudo dnf install \
https://download1.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-$(rpm -E %fedora).noarch.rpm
# Install ffmpeg non-free
sudo dnf swap ffmpeg-free ffmpeg --allowerasing
sudo dnf update @multimedia --setopt="install_weak_deps=False" --exclude=PackageKit-gstreamer-plugin
sudo dnf swap mesa-va-drivers mesa-va-drivers-freeworld
sudo dnf swap mesa-vdpau-drivers mesa-vdpau-drivers-freeworld
sudo dnf swap mesa-va-drivers.i686 mesa-va-drivers-freeworld.i686
sudo dnf swap mesa-vdpau-drivers.i686 mesa-vdpau-drivers-freeworld.i686
```
ffmpeg with vaapi
```bash
ffmpeg \
-hwaccel vaapi \
-hwaccel_output_format vaapi \
-i VID_20250804_120159.mp4 \
-vf 'format=nv12,hwupload' \
-vf scale_vaapi=1080:1920 \
-c:v h264_vaapi \
-c:a copy \
-qp 18 \
VID_20250804_120159_1.mp4
```
## Containers
In order to enter a shell with systemd-user access via `machinectl`, install systemd-container
```bash
dnf install -y systemd-container
```
Then you can run `machinectl shell myuser@` to enter a shell which can execute `systemctl --user` commands.
## XSane
Scan stuff
```bash
dnf install xsane
```

View File

@@ -19,6 +19,7 @@
- [Display](#display)
- [Scripted Display Modes](#scripted-display-modes)
- [Fixing generic Wayland icons on task alt tab](#fixing-generic-wayland-icons-on-task-alt-tab)
- [Tuned Power Profiles](#tuned-power-profiles)
## Framework 16 Fixes
@@ -103,14 +104,21 @@ toolbox enter
Set the default firewall to `drop`
```bash
firewall-cmd --set-default-zone=drop
firewall-cmd --reload
sudo firewall-cmd --set-default-zone=drop
sudo firewall-cmd --reload
```
Allow KDE Connect via 1714-1764 tcp/udp
```bash
firewall-cmd --add-port=1714-1764/udp --add-port=1714-1764/tcp --permanent
# Set source address to allow connections
sudo firewall-cmd \
--zone=drop \
--permanent \
--add-port=1714-1764/udp \
--add-port=1714-1764/tcp
sudo firewall-cmd --reload
```
You can check if the firewall is working via `nmap` from another machine
@@ -343,3 +351,11 @@ output.eDP-2.scale.1'
Apply the new settings and close the application if it was open. The next
time you open the application, it should show the correct icon.
## Tuned Power Profiles
Default profiles are in `/usr/lib/tuned/profiles`.
Configuration file is in `/etc/tuned/ppd.conf`.
Used `tuned-adm` CLI to interface with tuned.

View File

@@ -0,0 +1,14 @@
module clamav-notifysend 1.0;
require {
type session_dbusd_tmp_t;
type antivirus_t;
type unconfined_dbusd_t;
class sock_file write;
class unix_stream_socket connectto;
}
#============= antivirus_t ==============
allow antivirus_t session_dbusd_tmp_t:sock_file write;
allow antivirus_t unconfined_dbusd_t:unix_stream_socket connectto;

View File

@@ -0,0 +1,29 @@
module clamav-sudo 1.0;
require {
type antivirus_t;
type sudo_exec_t;
type systemd_logind_var_run_t;
type pidfs_t;
type chkpwd_exec_t;
type systemd_logind_t;
class file { execute execute_no_trans map };
class netlink_audit_socket { create nlmsg_relay read write };
class capability { audit_write sys_resource };
class process { setrlimit setsched };
class sock_file write;
class unix_stream_socket connectto;
class filesystem getattr;
}
#============= antivirus_t ==============
allow antivirus_t chkpwd_exec_t:file { execute execute_no_trans };
allow antivirus_t pidfs_t:filesystem getattr;
allow antivirus_t self:capability { audit_write sys_resource };
allow antivirus_t self:netlink_audit_socket { create nlmsg_relay write };
allow antivirus_t self:netlink_audit_socket read;
allow antivirus_t self:process { setrlimit setsched };
allow antivirus_t sudo_exec_t:file map;
allow antivirus_t systemd_logind_t:unix_stream_socket connectto;
allow antivirus_t systemd_logind_var_run_t:sock_file write;

View File

@@ -0,0 +1,23 @@
module clamav-unixchkpwd 1.0;
require {
type chkpwd_t;
type user_devpts_t;
type antivirus_t;
type shadow_t;
type init_t;
class chr_file { read write };
class file { getattr open read };
class process siginh;
}
#============= antivirus_t ==============
allow antivirus_t shadow_t:file { open read };
allow antivirus_t shadow_t:file getattr;
#============= chkpwd_t ==============
allow chkpwd_t user_devpts_t:chr_file { read write };
#============= init_t ==============
allow init_t chkpwd_t:process siginh;

View File

@@ -0,0 +1,16 @@
module my-rpcvirtstorage 1.0;
require {
type user_home_t;
type virtstoraged_t;
type qemu_var_run_t;
class dir setattr;
class capability fowner;
class file setattr;
}
#============= virtstoraged_t ==============
allow virtstoraged_t qemu_var_run_t:file setattr;
allow virtstoraged_t self:capability fowner;
allow virtstoraged_t user_home_t:dir setattr;

View File

@@ -0,0 +1,703 @@
From d4022a63f388f9cd0f537a4eb371e111c44b9c9c Mon Sep 17 00:00:00 2001
From: Georgij Krajnyukov <nrk63@yandex.ru>
Date: Wed, 19 Feb 2025 11:59:14 +0300
Subject: [PATCH 1/4] P11_CHILD: Invert if statement to reduce code nesting
Reviewed-by: Alexey Tikhonov <atikhono@redhat.com>
Reviewed-by: Sumit Bose <sbose@redhat.com>
---
src/p11_child/p11_child_openssl.c | 33 +++++++++++++++----------------
1 file changed, 16 insertions(+), 17 deletions(-)
diff --git a/src/p11_child/p11_child_openssl.c b/src/p11_child/p11_child_openssl.c
index 45a4930ba..8aa73c035 100644
--- a/src/p11_child/p11_child_openssl.c
+++ b/src/p11_child/p11_child_openssl.c
@@ -1928,26 +1928,25 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
if (slot_id == (CK_SLOT_ID)-1) {
DEBUG(SSSDBG_TRACE_ALL, "Token not present.\n");
- if (p11_ctx->wait_for_card) {
- /* After obtaining the module's slot list (in the loop above), this
- * call is needed to let any changes in slots take effect. */
- rv = module->C_GetSlotList(CK_FALSE, NULL, &num_slots);
- if (rv != CKR_OK) {
- DEBUG(SSSDBG_OP_FAILURE, "C_GetSlotList failed [%lu][%s].\n",
- rv, p11_kit_strerror(rv));
- ret = EIO;
- goto done;
- }
-
- ret = wait_for_card(module, &slot_id, &info, &token_info, uri);
- if (ret != EOK) {
- DEBUG(SSSDBG_OP_FAILURE, "wait_for_card failed.\n");
- goto done;
- }
- } else {
+ if (!p11_ctx->wait_for_card) {
ret = EIO;
goto done;
}
+ /* After obtaining the module's slot list (in the loop above), this
+ * call is needed to let any changes in slots take effect. */
+ rv = module->C_GetSlotList(CK_FALSE, NULL, &num_slots);
+ if (rv != CKR_OK) {
+ DEBUG(SSSDBG_OP_FAILURE, "C_GetSlotList failed [%lu][%s].\n",
+ rv, p11_kit_strerror(rv));
+ ret = EIO;
+ goto done;
+ }
+
+ ret = wait_for_card(module, &slot_id, &info, &token_info, uri);
+ if (ret != EOK) {
+ DEBUG(SSSDBG_OP_FAILURE, "wait_for_card failed.\n");
+ goto done;
+ }
}
module_id = c;
--
2.51.0
From 06ee7f46937b40da85628a03c59865fffea48e90 Mon Sep 17 00:00:00 2001
From: Georgij Krajnyukov <nrk63@yandex.ru>
Date: Tue, 4 Mar 2025 12:28:33 +0300
Subject: [PATCH 2/4] P11_CHILD: Implement passing const args to get_pkcs11_uri
Reviewed-by: Alexey Tikhonov <atikhono@redhat.com>
Reviewed-by: Sumit Bose <sbose@redhat.com>
---
src/p11_child/p11_child_openssl.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/p11_child/p11_child_openssl.c b/src/p11_child/p11_child_openssl.c
index 8aa73c035..30782d955 100644
--- a/src/p11_child/p11_child_openssl.c
+++ b/src/p11_child/p11_child_openssl.c
@@ -505,9 +505,9 @@ done:
return ret;
}
-static char *get_pkcs11_uri(TALLOC_CTX *mem_ctx, CK_INFO *module_info,
- CK_SLOT_INFO *slot_info, CK_SLOT_ID slot_id,
- CK_TOKEN_INFO *token_info, CK_ATTRIBUTE *label,
+static char *get_pkcs11_uri(TALLOC_CTX *mem_ctx, const CK_INFO *module_info,
+ const CK_SLOT_INFO *slot_info, CK_SLOT_ID slot_id,
+ const CK_TOKEN_INFO *token_info, CK_ATTRIBUTE *label,
CK_ATTRIBUTE *id)
{
P11KitUri *uri;
--
2.51.0
From 54d3110be5146ec1d6575389bc60ad7585493984 Mon Sep 17 00:00:00 2001
From: Georgij Krajnyukov <nrk63@yandex.ru>
Date: Wed, 19 Feb 2025 12:22:30 +0300
Subject: [PATCH 3/4] P11_CHILD: Extract slot processing into separate function
Reviewed-by: Alexey Tikhonov <atikhono@redhat.com>
Reviewed-by: Sumit Bose <sbose@redhat.com>
---
src/p11_child/p11_child_openssl.c | 399 ++++++++++++++++--------------
1 file changed, 212 insertions(+), 187 deletions(-)
diff --git a/src/p11_child/p11_child_openssl.c b/src/p11_child/p11_child_openssl.c
index 30782d955..b96d19f88 100644
--- a/src/p11_child/p11_child_openssl.c
+++ b/src/p11_child/p11_child_openssl.c
@@ -1702,6 +1702,215 @@ static errno_t wait_for_card(CK_FUNCTION_LIST *module, CK_SLOT_ID *slot_id,
#define MAX_SLOTS 64
+errno_t do_slot(CK_FUNCTION_LIST *module, size_t module_id, CK_SLOT_ID slot_id,
+ const CK_SLOT_INFO *info, const CK_TOKEN_INFO *token_info,
+ const CK_INFO *module_info, TALLOC_CTX *mem_ctx,
+ struct p11_ctx *p11_ctx, enum op_mode mode, const char *pin,
+ const char *module_name_in, const char *token_name_in,
+ const char *key_id_in, const char *label_in,
+ const char *uri_str, char **_multi) {
+ int ret;
+ CK_RV rv;
+ char *module_file_name = NULL;
+ char *slot_name = NULL;
+ char *token_name = NULL;
+ CK_SESSION_HANDLE session = 0;
+ struct cert_list *cert_list = NULL;
+ struct cert_list *item = NULL;
+ struct cert_list *next_item = NULL;
+ bool pkcs11_session = false;
+ bool pkcs11_login = false;
+
+ slot_name = p11_kit_space_strdup(info->slotDescription,
+ sizeof(info->slotDescription));
+ if (slot_name == NULL) {
+ DEBUG(SSSDBG_OP_FAILURE, "p11_kit_space_strdup failed.\n");
+ ret = ENOMEM;
+ goto done;
+ }
+
+ token_name = p11_kit_space_strdup(token_info->label,
+ sizeof(token_info->label));
+ if (token_name == NULL) {
+ DEBUG(SSSDBG_OP_FAILURE, "p11_kit_space_strdup failed.\n");
+ ret = ENOMEM;
+ goto done;
+ }
+
+ module_file_name = p11_kit_module_get_filename(module);
+ if (module_file_name == NULL) {
+ DEBUG(SSSDBG_OP_FAILURE, "p11_kit_module_get_filename failed.\n");
+ ret = ENOMEM;
+ goto done;
+ }
+
+ DEBUG(SSSDBG_TRACE_ALL, "Found [%s] in slot [%s][%d] of module [%d][%s].\n",
+ token_name, slot_name, (int) slot_id, (int) module_id,
+ module_file_name);
+
+ rv = module->C_OpenSession(slot_id, CKF_SERIAL_SESSION, NULL, NULL,
+ &session);
+ if (rv != CKR_OK) {
+ DEBUG(SSSDBG_OP_FAILURE, "C_OpenSession failed [%lu][%s].\n",
+ rv, p11_kit_strerror(rv));
+ ret = EIO;
+ goto done;
+ }
+ pkcs11_session = true;
+
+ /* login: do we need to check for Login Required? */
+ if (mode == OP_AUTH) {
+ DEBUG(SSSDBG_TRACE_ALL, "Login required.\n");
+ DEBUG(SSSDBG_TRACE_ALL, "Token flags [%lu].\n", token_info->flags);
+ if ((pin != NULL)
+ || (token_info->flags & CKF_PROTECTED_AUTHENTICATION_PATH)) {
+
+ if (token_info->flags & CKF_PROTECTED_AUTHENTICATION_PATH) {
+ DEBUG(SSSDBG_TRACE_ALL, "Protected authentication path.\n");
+ pin = NULL;
+ }
+ rv = module->C_Login(session, CKU_USER, discard_const(pin),
+ (pin != NULL) ? strlen(pin) : 0);
+ if (rv == CKR_PIN_LOCKED) {
+ DEBUG(SSSDBG_OP_FAILURE, "C_Login failed: PIN locked\n");
+ ret = ERR_P11_PIN_LOCKED;
+ goto done;
+ }
+ else if (rv != CKR_OK) {
+ DEBUG(SSSDBG_OP_FAILURE, "C_Login failed [%lu][%s].\n",
+ rv, p11_kit_strerror(rv));
+ ret = EIO;
+ goto done;
+ }
+ pkcs11_login = true;
+ } else {
+ DEBUG(SSSDBG_CRIT_FAILURE,
+ "Login required but no PIN available, continue.\n");
+ }
+ } else {
+ DEBUG(SSSDBG_TRACE_ALL, "Login NOT required.\n");
+ }
+
+ ret = read_certs(mem_ctx, module, session, p11_ctx, &cert_list);
+ if (ret != EOK) {
+ DEBUG(SSSDBG_OP_FAILURE, "read_certs failed.\n");
+ goto done;
+ }
+
+ DLIST_FOR_EACH_SAFE(item, next_item, cert_list) {
+ /* Check if we found the certificates we needed for authentication or
+ * the requested ones for pre-auth. For authentication all attributes
+ * except the label must be given and match. The label is optional for
+ * authentication but if given it must match as well. For pre-auth
+ * only the given ones must match. */
+ DEBUG(SSSDBG_TRACE_ALL, "%s %s %s %s %s %s %s.\n",
+ module_name_in, module_file_name, token_name_in, token_name,
+ key_id_in, label_in == NULL ? "- no label given-" : label_in,
+ item->id);
+
+ if ((mode == OP_AUTH
+ && module_name_in != NULL
+ && token_name_in != NULL
+ && key_id_in != NULL
+ && item->id != NULL
+ && strcmp(key_id_in, item->id) == 0
+ && (label_in == NULL
+ || (label_in != NULL && item->label != NULL
+ && strcmp(label_in, item->label) == 0))
+ && strcmp(token_name_in, token_name) == 0
+ && strcmp(module_name_in, module_file_name) == 0)
+ || (mode == OP_PREAUTH
+ && (module_name_in == NULL
+ || (module_name_in != NULL
+ && strcmp(module_name_in, module_file_name) == 0))
+ && (token_name_in == NULL
+ || (token_name_in != NULL
+ && strcmp(token_name_in, token_name) == 0))
+ && (key_id_in == NULL
+ || (key_id_in != NULL && item->id != NULL
+ && strcmp(key_id_in, item->id) == 0)))) {
+
+ item->uri = get_pkcs11_uri(mem_ctx, module_info, info, slot_id,
+ token_info,
+ &item->attributes[1] /* label */,
+ &item->attributes[0] /* id */);
+ DEBUG(SSSDBG_TRACE_ALL, "uri: %s.\n", item->uri);
+
+ } else {
+ DLIST_REMOVE(cert_list, item);
+ talloc_free(item);
+ }
+ }
+
+ /* TODO: check module_name_in, token_name_in, key_id_in */
+
+ if (cert_list == NULL) {
+ DEBUG(SSSDBG_TRACE_ALL, "No certificate found.\n");
+ *_multi = NULL;
+ ret = EOK;
+ goto done;
+ }
+
+ if (mode == OP_AUTH) {
+ if (cert_list->next != NULL || cert_list->prev != NULL) {
+ DEBUG(SSSDBG_FATAL_FAILURE,
+ "More than one certificate found for authentication, "
+ "aborting!\n");
+ ret = EINVAL;
+ goto done;
+ }
+
+ ret = sign_data(module, session, slot_id, cert_list);
+ if (ret != EOK) {
+ DEBUG(SSSDBG_OP_FAILURE, "sign_data failed.\n");
+ ret = EACCES;
+ goto done;
+ }
+
+ DEBUG(SSSDBG_TRACE_ALL,
+ "Certificate verified and validated.\n");
+ }
+
+ *_multi = talloc_strdup(mem_ctx, "");
+ if (*_multi == NULL) {
+ DEBUG(SSSDBG_CRIT_FAILURE, "Failed to create output string.\n");
+ ret = ENOMEM;
+ goto done;
+ }
+
+ DLIST_FOR_EACH(item, cert_list) {
+ DEBUG(SSSDBG_TRACE_ALL, "Found certificate has key id [%s].\n",
+ item->id);
+
+ *_multi = talloc_asprintf_append(*_multi, "%s\n%s\n%s\n%s\n%s\n",
+ token_name, module_file_name, item->id,
+ item->label, item->cert_b64);
+ }
+
+ ret = EOK;
+done:
+ if (module != NULL) {
+ if (pkcs11_login) {
+ rv = module->C_Logout(session);
+ if (rv != CKR_OK) {
+ DEBUG(SSSDBG_OP_FAILURE, "C_Logout failed [%lu][%s].\n",
+ rv, p11_kit_strerror(rv));
+ }
+ }
+ if (pkcs11_session) {
+ rv = module->C_CloseSession(session);
+ if (rv != CKR_OK) {
+ DEBUG(SSSDBG_OP_FAILURE, "C_CloseSession failed [%lu][%s].\n",
+ rv, p11_kit_strerror(rv));
+ }
+ }
+ }
+ free(slot_name);
+ free(token_name);
+ free(module_file_name);
+ return ret;
+}
+
errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
enum op_mode mode, const char *pin,
const char *module_name_in, const char *token_name_in,
@@ -1724,16 +1933,7 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
CK_INFO module_info;
CK_RV rv;
size_t module_id;
- char *module_file_name = NULL;
- char *slot_name = NULL;
- char *token_name = NULL;
- CK_SESSION_HANDLE session = 0;
- struct cert_list *cert_list = NULL;
- struct cert_list *item = NULL;
- struct cert_list *next_item = NULL;
char *multi = NULL;
- bool pkcs11_session = false;
- bool pkcs11_login = false;
P11KitUri *uri = NULL;
if (uri_str != NULL) {
@@ -1950,188 +2150,13 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
}
module_id = c;
- slot_name = p11_kit_space_strdup(info.slotDescription,
- sizeof(info.slotDescription));
- if (slot_name == NULL) {
- DEBUG(SSSDBG_OP_FAILURE, "p11_kit_space_strdup failed.\n");
- ret = ENOMEM;
- goto done;
- }
-
- token_name = p11_kit_space_strdup(token_info.label,
- sizeof(token_info.label));
- if (token_name == NULL) {
- DEBUG(SSSDBG_OP_FAILURE, "p11_kit_space_strdup failed.\n");
- ret = ENOMEM;
- goto done;
- }
-
- module_file_name = p11_kit_module_get_filename(module);
- if (module_file_name == NULL) {
- DEBUG(SSSDBG_OP_FAILURE, "p11_kit_module_get_filename failed.\n");
- ret = ENOMEM;
- goto done;
- }
-
- DEBUG(SSSDBG_TRACE_ALL, "Found [%s] in slot [%s][%d] of module [%d][%s].\n",
- token_name, slot_name, (int) slot_id, (int) module_id,
- module_file_name);
-
- rv = module->C_OpenSession(slot_id, CKF_SERIAL_SESSION, NULL, NULL,
- &session);
- if (rv != CKR_OK) {
- DEBUG(SSSDBG_OP_FAILURE, "C_OpenSession failed [%lu][%s].\n",
- rv, p11_kit_strerror(rv));
- ret = EIO;
- goto done;
- }
- pkcs11_session = true;
-
- /* login: do we need to check for Login Required? */
- if (mode == OP_AUTH) {
- DEBUG(SSSDBG_TRACE_ALL, "Login required.\n");
- if (pin != NULL) {
- rv = module->C_Login(session, CKU_USER, discard_const(pin),
- strlen(pin));
- if (rv == CKR_PIN_LOCKED) {
- DEBUG(SSSDBG_OP_FAILURE, "C_Login failed: PIN locked\n");
- ret = ERR_P11_PIN_LOCKED;
- goto done;
- }
- else if (rv != CKR_OK) {
- DEBUG(SSSDBG_OP_FAILURE, "C_Login failed [%lu][%s].\n",
- rv, p11_kit_strerror(rv));
- ret = EIO;
- goto done;
- }
- pkcs11_login = true;
- } else {
- DEBUG(SSSDBG_CRIT_FAILURE,
- "Login required but no PIN available, continue.\n");
- }
- } else {
- DEBUG(SSSDBG_TRACE_ALL, "Login NOT required.\n");
- }
-
- ret = read_certs(mem_ctx, module, session, p11_ctx, &cert_list);
- if (ret != EOK) {
- DEBUG(SSSDBG_OP_FAILURE, "read_certs failed.\n");
- goto done;
- }
-
- DLIST_FOR_EACH_SAFE(item, next_item, cert_list) {
- /* Check if we found the certificates we needed for authentication or
- * the requested ones for pre-auth. For authentication all attributes
- * except the label must be given and match. The label is optional for
- * authentication but if given it must match as well. For pre-auth
- * only the given ones must match. */
- DEBUG(SSSDBG_TRACE_ALL, "%s %s %s %s %s %s %s.\n",
- module_name_in, module_file_name, token_name_in, token_name,
- key_id_in, label_in == NULL ? "- no label given-" : label_in,
- item->id);
-
- if ((mode == OP_AUTH
- && module_name_in != NULL
- && token_name_in != NULL
- && key_id_in != NULL
- && item->id != NULL
- && strcmp(key_id_in, item->id) == 0
- && (label_in == NULL
- || (label_in != NULL && item->label != NULL
- && strcmp(label_in, item->label) == 0))
- && strcmp(token_name_in, token_name) == 0
- && strcmp(module_name_in, module_file_name) == 0)
- || (mode == OP_PREAUTH
- && (module_name_in == NULL
- || (module_name_in != NULL
- && strcmp(module_name_in, module_file_name) == 0))
- && (token_name_in == NULL
- || (token_name_in != NULL
- && strcmp(token_name_in, token_name) == 0))
- && (key_id_in == NULL
- || (key_id_in != NULL && item->id != NULL
- && strcmp(key_id_in, item->id) == 0)))) {
-
- item->uri = get_pkcs11_uri(mem_ctx, &module_info, &info, slot_id,
- &token_info,
- &item->attributes[1] /* label */,
- &item->attributes[0] /* id */);
- DEBUG(SSSDBG_TRACE_ALL, "uri: %s.\n", item->uri);
-
- } else {
- DLIST_REMOVE(cert_list, item);
- talloc_free(item);
- }
- }
-
- /* TODO: check module_name_in, token_name_in, key_id_in */
-
- if (cert_list == NULL) {
- DEBUG(SSSDBG_TRACE_ALL, "No certificate found.\n");
- *_multi = NULL;
- ret = EOK;
- goto done;
- }
-
- if (mode == OP_AUTH) {
- if (cert_list->next != NULL || cert_list->prev != NULL) {
- DEBUG(SSSDBG_FATAL_FAILURE,
- "More than one certificate found for authentication, "
- "aborting!\n");
- ret = EINVAL;
- goto done;
- }
-
- ret = sign_data(module, session, slot_id, cert_list);
- if (ret != EOK) {
- DEBUG(SSSDBG_OP_FAILURE, "sign_data failed.\n");
- ret = EACCES;
- goto done;
- }
-
- DEBUG(SSSDBG_TRACE_ALL,
- "Certificate verified and validated.\n");
- }
-
- multi = talloc_strdup(mem_ctx, "");
- if (multi == NULL) {
- DEBUG(SSSDBG_CRIT_FAILURE, "Failed to create output string.\n");
- ret = ENOMEM;
- goto done;
- }
-
- DLIST_FOR_EACH(item, cert_list) {
- DEBUG(SSSDBG_TRACE_ALL, "Found certificate has key id [%s].\n",
- item->id);
-
- multi = talloc_asprintf_append(multi, "%s\n%s\n%s\n%s\n%s\n",
- token_name, module_file_name, item->id,
- item->label, item->cert_b64);
- }
-
+ ret = do_slot(module, module_id, slot_id, &info, &token_info, &module_info,
+ mem_ctx, p11_ctx, mode, pin, module_name_in, token_name_in,
+ key_id_in, label_in, uri_str, &multi);
*_multi = multi;
ret = EOK;
done:
- if (module != NULL) {
- if (pkcs11_login) {
- rv = module->C_Logout(session);
- if (rv != CKR_OK) {
- DEBUG(SSSDBG_OP_FAILURE, "C_Logout failed [%lu][%s].\n",
- rv, p11_kit_strerror(rv));
- }
- }
- if (pkcs11_session) {
- rv = module->C_CloseSession(session);
- if (rv != CKR_OK) {
- DEBUG(SSSDBG_OP_FAILURE, "C_CloseSession failed [%lu][%s].\n",
- rv, p11_kit_strerror(rv));
- }
- }
- }
- free(slot_name);
- free(token_name);
- free(module_file_name);
p11_kit_modules_finalize_and_release(modules);
p11_kit_uri_free(uri);
--
2.51.0
From 218c418e1ad34efe3922937673716c0cc657597c Mon Sep 17 00:00:00 2001
From: Georgij Krajnyukov <nrk63@yandex.ru>
Date: Fri, 14 Feb 2025 15:00:50 +0300
Subject: [PATCH 4/4] P11_CHILD: Make p11_child iterate over all slots
Resolves: https://github.com/SSSD/sssd/issues/5905
Reviewed-by: Alexey Tikhonov <atikhono@redhat.com>
Reviewed-by: Sumit Bose <sbose@redhat.com>
---
src/p11_child/p11_child_openssl.c | 74 +++++++++++++++++++++----------
1 file changed, 51 insertions(+), 23 deletions(-)
diff --git a/src/p11_child/p11_child_openssl.c b/src/p11_child/p11_child_openssl.c
index b96d19f88..154a3052b 100644
--- a/src/p11_child/p11_child_openssl.c
+++ b/src/p11_child/p11_child_openssl.c
@@ -1748,6 +1748,15 @@ errno_t do_slot(CK_FUNCTION_LIST *module, size_t module_id, CK_SLOT_ID slot_id,
token_name, slot_name, (int) slot_id, (int) module_id,
module_file_name);
+ if (mode == OP_AUTH && strcmp(token_name, token_name_in) != 0) {
+ DEBUG(SSSDBG_TRACE_ALL, "Token name [%s] does not match "
+ "token_name_in [%s]. "
+ "Skipping this token...\n",
+ token_name, token_name_in);
+ ret = EOK;
+ goto done;
+ }
+
rv = module->C_OpenSession(slot_id, CKF_SERIAL_SESSION, NULL, NULL,
&session);
if (rv != CKR_OK) {
@@ -1846,7 +1855,6 @@ errno_t do_slot(CK_FUNCTION_LIST *module, size_t module_id, CK_SLOT_ID slot_id,
if (cert_list == NULL) {
DEBUG(SSSDBG_TRACE_ALL, "No certificate found.\n");
- *_multi = NULL;
ret = EOK;
goto done;
}
@@ -1871,13 +1879,6 @@ errno_t do_slot(CK_FUNCTION_LIST *module, size_t module_id, CK_SLOT_ID slot_id,
"Certificate verified and validated.\n");
}
- *_multi = talloc_strdup(mem_ctx, "");
- if (*_multi == NULL) {
- DEBUG(SSSDBG_CRIT_FAILURE, "Failed to create output string.\n");
- ret = ENOMEM;
- goto done;
- }
-
DLIST_FOR_EACH(item, cert_list) {
DEBUG(SSSDBG_TRACE_ALL, "Found certificate has key id [%s].\n",
item->id);
@@ -1885,6 +1886,12 @@ errno_t do_slot(CK_FUNCTION_LIST *module, size_t module_id, CK_SLOT_ID slot_id,
*_multi = talloc_asprintf_append(*_multi, "%s\n%s\n%s\n%s\n%s\n",
token_name, module_file_name, item->id,
item->label, item->cert_b64);
+ if (*_multi == NULL) {
+ DEBUG(SSSDBG_CRIT_FAILURE,
+ "Failed to append certiticate to the output string.\n");
+ ret = ENOMEM;
+ goto done;
+ }
}
ret = EOK;
@@ -1933,9 +1940,14 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
CK_INFO module_info;
CK_RV rv;
size_t module_id;
- char *multi = NULL;
P11KitUri *uri = NULL;
+ *_multi = talloc_strdup(mem_ctx, "");
+ if (*_multi == NULL) {
+ DEBUG(SSSDBG_CRIT_FAILURE, "Failed to create output string.\n");
+ return ENOMEM;
+ }
+
if (uri_str != NULL) {
uri = p11_kit_uri_new();
if (uri == NULL) {
@@ -1986,9 +1998,17 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
}
DEBUG(SSSDBG_TRACE_ALL, "common name: [%s].\n", mod_name);
- DEBUG(SSSDBG_TRACE_ALL, "dll name: [%s].\n", mod_file_name);
-
free(mod_name);
+
+ DEBUG(SSSDBG_TRACE_ALL, "dll name: [%s].\n", mod_file_name);
+ if (mode == OP_AUTH && strcmp(mod_file_name, module_name_in) != 0) {
+ DEBUG(SSSDBG_TRACE_ALL, "Module name [%s] does not match "
+ "module_name_in [%s]. "
+ "Skipping this module...\n",
+ mod_file_name, module_name_in);
+ free(mod_file_name);
+ continue;
+ }
free(mod_file_name);
rv = modules[c]->C_GetInfo(&module_info);
@@ -2104,10 +2124,13 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
}
slot_id = slots[s];
- break;
- }
- if (slot_id != (CK_SLOT_ID)-1) {
- break;
+ module_id = c;
+ ret = do_slot(module, module_id, slot_id, &info, &token_info, &module_info,
+ mem_ctx, p11_ctx, mode, pin, module_name_in, token_name_in,
+ key_id_in, label_in, uri_str, _multi);
+ if (ret != EOK) {
+ goto done;
+ }
}
}
@@ -2126,7 +2149,7 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
goto done;
}
- if (slot_id == (CK_SLOT_ID)-1) {
+ if (slot_id == (CK_SLOT_ID)-1 || (mode == OP_AUTH && *_multi[0] == '\0')) {
DEBUG(SSSDBG_TRACE_ALL, "Token not present.\n");
if (!p11_ctx->wait_for_card) {
ret = EIO;
@@ -2147,18 +2170,23 @@ errno_t do_card(TALLOC_CTX *mem_ctx, struct p11_ctx *p11_ctx,
DEBUG(SSSDBG_OP_FAILURE, "wait_for_card failed.\n");
goto done;
}
- }
- module_id = c;
- ret = do_slot(module, module_id, slot_id, &info, &token_info, &module_info,
- mem_ctx, p11_ctx, mode, pin, module_name_in, token_name_in,
- key_id_in, label_in, uri_str, &multi);
- *_multi = multi;
+ ret = do_slot(module, module_id, slot_id, &info, &token_info, &module_info,
+ mem_ctx, p11_ctx, mode, pin, module_name_in, token_name_in,
+ key_id_in, label_in, uri_str, _multi);
+ if (mode == OP_AUTH && *_multi[0] == '\0') {
+ ret = EIO;
+ }
+ }
- ret = EOK;
done:
p11_kit_modules_finalize_and_release(modules);
p11_kit_uri_free(uri);
+ if (ret != EOK) {
+ talloc_free(*_multi);
+ *_multi = NULL;
+ }
+
return ret;
}
--
2.51.0

85
active/os_rhel/rhel8.md Normal file
View File

@@ -0,0 +1,85 @@
# RHEL 8
## Subscriptions
```bash
# Re-up subscription
subscription-manager register
subscription-manager release --show
subscription-manager release --set=8_10
```
`dnf update` will sometimes throw an error like:
```bash
Updating Subscription Management repositories.
Red Hat Enterprise Linux 8 for x86_64 - BaseOS (RPMs) 0.0 B/s | 0 B 00:00
Errors during downloading metadata for repository 'rhel-8-for-x86_64-baseos-rpms':
- Curl error (77): Problem with the SSL CA cert (path? access rights?) for https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/repodata/repomd.xml [error setting certificate verify locations:
CAfile: %(ca_cert_dir)sredhat-uep.pem
CApath: none]
Error: Failed to download metadata for repo 'rhel-8-for-x86_64-baseos-rpms': Cannot download repomd.xml: Curl error (77): Problem with the SSL CA cert (path? access rights?) for https://cdn.redhat.com/content/dist/rhel8/8/x86_64/baseos/os/repodata/repomd.xml [error setting certificate verify locations:
CAfile: %(ca_cert_dir)sredhat-uep.pem
CApath: none]
```
It can be fixed by adding the correct details to /etc/rhsm/rhsm.conf
<https://access.redhat.com/solutions/7098119>
```bash
# /etc/rhsm/rhsm.conf
[rhsm]
# Content base URL:
baseurl = https://cdn.redhat.com
# Repository metadata GPG key URL:
repomd_gpg_url =
# Server CA certificate location:
ca_cert_dir = /etc/rhsm/ca/
# Default CA cert to use when generating yum repo configs:
repo_ca_cert = %(ca_cert_dir)sredhat-uep.pem
```
## DNF Repo Hotfixes
Fixes an issue where dnf would fail to install a package due to multiple
modules or "conflicting requests" or "filtered out by modular filtering".
<https://dnf.readthedocs.io/en/latest/modularity.html#hotfix-repositories>
```ini
# Add this to all conflicting rpm sources in /etc/yum.repos.d
module_hotfixes=1
```
## Patching an RPM
### Creating the patch
1. Create your patch by checking out the tag matching the rpm version of the software you want to patch
2. Cherry pick the commits you want to patch in: `git cherry-pick 3392a857c^..782a6dd54 --strategy-option=theirs`
3. Create a diff: `git format-patch 20ddeab85^..0cf92b3d4 --stdout > p11child.patch`
4. Copy the patch to your rhel instance
### Applying the patch
1. Enable the `codeready-builder-for-...-source-rpms` rpm source in `/etc/yum.repos.d/redhat.repo`
2. For RHEL 8: Add `module_hotfixes=1` to appstream and codebuild in `/etc/yum.repos.d/redhat.repo`
3. `dnf download --source sssd`
4. `rpm -i sssd-2.9.4-5.el8_10.2.src.rpm`
5. `cd rpmbuild/SPECS`
6. Edit `sssd.spec` and change `Release: 5%{?dist}.2` to match your release: e.g. `Release: 5%{?dist}_10.2`
7. `dnf builddep sssd.spec`
8. `rpmbuild -bb sssd.spec --nocheck`
9. `cd ~/rpmbuild/RPMS/x86_64`
10. For RHEL 8 `rpm -ivh ./sssd-2.9.4-5.el8_10.2.x86_64.rpm --force`
11. For RHEL 9 `dnf install ./sssd-2.9.4-5.el8_10.2.x86_64.rpm`
## VM Passthrough
If you get "device or resource busy" while trying to pass through a smart card
to a VM, you'll probably need to `systemctl stop pcscd` on the host.

BIN
active/os_rhel/sssd-rpm.tar Normal file

Binary file not shown.

View File

@@ -0,0 +1,18 @@
--- sssd.spec 2025-10-07 16:20:35.377452600 -0400
+++ sssd.spec.bk 2025-10-07 16:20:23.322575768 -0400
@@ -19,7 +19,7 @@
Name: sssd
Version: 2.9.4
-Release: 5%{?dist}.2
+Release: 5%{?dist}_10.2
Group: Applications/System
Summary: System Security Services Daemon
License: GPLv3+
@@ -46,6 +46,7 @@
Patch0017: 0017-KCM-another-memory-leak-fixed.patch
Patch0018: 0018-SYSDB-don-t-add-group-members-if-ignore_group_member.patch
Patch0019: 0019-SYSDB-Use-SYSDB_NAME-from-cached-entry-when-updating.patch
+Patch0020: 0020-p11child.patch
### Downstream Patches ###

View File

@@ -17,9 +17,12 @@
- [Cleaning up old snapshots](#cleaning-up-old-snapshots)
- [Creating and restoring snapshots](#creating-and-restoring-snapshots)
- [Filesystem ACLs](#filesystem-acls)
- [Decrypting Pools](#decrypting-pools)
- [ZPool Scrubbing](#zpool-scrubbing)
- [ISCSI](#iscsi)
- [Create ZVOL](#create-zvol)
- [Create ISCSI Target](#create-iscsi-target)
- [Troubleshooting](#troubleshooting)
- [VMs](#vms)
- [Converting zvol to qcow2](#converting-zvol-to-qcow2)
- [Converting qcow2 to zvol](#converting-qcow2-to-zvol)
@@ -247,6 +250,42 @@ Dataset -> Dataset details (edit) -> Advanced Options -> ACL Type (inherit)
setfacl -b -R /mnt/enc0/smb/media
```
### Decrypting Pools
Unlocking through the UI.
We'll need to recreate the key manifest json. This is a little tedious, but
your keys will be correct after this process.
```bash
# List all datasets and format them for json keys
export LIST_DATASET=pool0/dcsi
echo "{" && \
for DATASET_PATH in $(sudo zfs list -r $LIST_DATASET -H -o name); do echo " \"$DATASET_PATH\": \"key_here\","; done && \
echo "}"
# If the dataset's children have all the encryption keys
# Note this generates the cat EOF commands to create the json files needed to unlock.
export TL_DATASET=pool0
for TL_DATASET_PATH in $(zfs list -r $TL_DATASET -H -o name -d 1); do \
echo "cat <<EOF > dataset_${TL_DATASET_PATH}_key.json" && \
echo "{" && \
for DATASET_PATH in $(zfs list -r $TL_DATASET_PATH -H -o name); do echo " \"$DATASET_PATH\": \"key_here\","; done && \
echo "}" && \
echo "EOF";
done
```
### ZPool Scrubbing
```bash
# Start a scrub
zpool scrub pool0
# Check status
zpool status pool0
```
## ISCSI
### Create ZVOL
@@ -280,6 +319,13 @@ In Shared -> Block (iSCSI) Shares Targets
1. Authentication Method: `CHAP`
2. Authentication Group Number: The group number you created above
### Troubleshooting
```bash
# ISCSI connection logs
tail -f /var/log/scst.log
```
## VMs
1. Force UEFI installation

View File

@@ -4,6 +4,19 @@
## Setup
### Create the bricktracker user
```bash
# SSH into your podman server as root
useradd bricktracker
loginctl enable-linger $(id -u bricktracker)
systemctl --user --machine=bricktracker@.host enable podman-restart
systemctl --user --machine=bricktracker@.host enable --now podman.socket
mkdir -p /home/bricktracker/.config/containers/systemd
```
### Configure App
1. Copy the `.env.sample` from <https://gitea.baerentsen.space/FrederikBaerentsen/BrickTracker/src/branch/master/.env.sample> to `.env`
2. Set the following:
1. `BK_AUTHENTICATION_PASSWORD`

View File

@@ -0,0 +1,8 @@
FROM docker.io/caddy:2-builder AS builder
RUN xcaddy build \
--with github.com/caddy-dns/route53@v1.6.0
FROM docker.io/caddy:2
COPY --from=builder /usr/bin/caddy /usr/bin/caddy

View File

@@ -4,7 +4,7 @@ Description=Caddy
[Container]
AddCapability=NET_ADMIN
ContainerName=caddy
Image=docker.io/caddy:2
Image=gitea.reeseapps.com/services/caddy:latest
Network=host
SecurityLabelDisable=true
Volume=/etc/caddy:/etc/caddy

View File

@@ -1,11 +1,24 @@
# Caddy Reverse Proxy
- [Caddy Reverse Proxy](#caddy-reverse-proxy)
- [Custom Caddy Image](#custom-caddy-image)
- [Install Caddy](#install-caddy)
- [Ansible](#ansible)
- [Manual](#manual)
- [Adding a new Caddy Record](#adding-a-new-caddy-record)
## Custom Caddy Image
This repo builds a custom caddy image with route53 DNS certbot support.
```bash
podman image pull gitea.reeseapps.com/services/caddy:latest
```
To upgrade the image, check [the caddy-dns route53
project](https://github.com/caddy-dns/route53/tags) releases and update the
`Containerfile` with the new version.
## Install Caddy
### Ansible
@@ -102,4 +115,4 @@ ddns service:
1. Update the [ddns caddy records](/active/podman_ddns/secrets/caddy_records.yaml)
2. (Optional) Update the Caddyfile at `active/podman_caddy/secrets/Caddyfile`
3. Run the [caddy ansible playbook](/active/podman_ddns/ddns.md#ansible-caddy-records)
3. Run the [caddy ansible playbook](/active/podman_caddy/caddy.md#install-caddy)

View File

@@ -4,7 +4,6 @@
- [Quickly Update DDNS Records](#quickly-update-ddns-records)
- [Install a New DDNS Service](#install-a-new-ddns-service)
- [Ansible 3D Server Records](#ansible-3d-server-records)
- [Ansible Podman Record](#ansible-podman-record)
- [Ansible Unifi External Records](#ansible-unifi-external-records)
- [Ansible Hostname reeselink records](#ansible-hostname-reeselink-records)
- [Development](#development)
@@ -86,16 +85,6 @@ active/podman_ddns/install_ddns.yaml \
-e "@active/podman_ddns/secrets/3dserver_records.yaml"
```
### Ansible Podman Record
```bash
ansible-playbook \
-i ansible/inventory.yaml \
-l podman \
active/podman_ddns/install_ddns.yaml \
-e "@active/podman_ddns/secrets/podman_records.yaml"
```
### Ansible Unifi External Records
```bash

View File

@@ -1,8 +1,6 @@
version: "3"
services:
gitea:
image: docker.gitea.com/gitea:1.24
image: docker.gitea.com/gitea:1.25-rootless
container_name: gitea
environment:
- GITEA__database__DB_TYPE=postgres
@@ -10,11 +8,15 @@ services:
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
security_opt:
- "label=disable"
restart: always
networks:
- gitea
volumes:
- /home/gitea/gitea_data:/data:Z
- /home/gitea/gitea_etc:/etc/gitea:Z
- /home/gitea/gitea_custom:/var/lib/gitea/custom:Z
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
@@ -27,6 +29,8 @@ services:
postgres:
image: docker.io/library/postgres:15
container_name: postgres
security_opt:
- "label=disable"
restart: always
environment:
- POSTGRES_USER=gitea

View File

@@ -6,8 +6,8 @@
- [Create the gitea user](#create-the-gitea-user)
- [Convert Compose to Quadlet](#convert-compose-to-quadlet)
- [Install Quadlets](#install-quadlets)
- [Upgrade Quadlets](#upgrade-quadlets)
- [Editing Configs within Container](#editing-configs-within-container)
- [Upgrade](#upgrade)
- [Editing Gitea Config](#editing-gitea-config)
- [Gitea Runners](#gitea-runners)
- [Firewall Rules](#firewall-rules)
- [Install](#install)
@@ -19,35 +19,38 @@
### A note on directories
```bash
RunMode: prod
AppPath: /usr/local/bin/gitea
WorkPath: /data/gitea
CustomPath: /data/gitea
ConfigFile: /data/gitea/conf/app.ini
2025/07/30 16:49:12 cmd/web.go:116:showWebStartupMessage() [I] * AppPath: /usr/local/bin/gitea
2025/07/30 16:49:12 cmd/web.go:117:showWebStartupMessage() [I] * WorkPath: /var/lib/gitea
2025/07/30 16:49:12 cmd/web.go:118:showWebStartupMessage() [I] * CustomPath: /var/lib/gitea/custom
2025/07/30 16:49:12 cmd/web.go:119:showWebStartupMessage() [I] * ConfigFile: /etc/gitea/app.ini
Data: /data/gitea/data/
2025/07/30 16:49:12 modules/storage/storage.go:176:initAttachments() [I] Initialising Attachment storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/attachments
2025/07/30 16:49:12 modules/storage/storage.go:166:initAvatars() [I] Initialising Avatar storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/avatars
2025/07/30 16:49:12 modules/storage/storage.go:192:initRepoAvatars() [I] Initialising Repository Avatar storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/repo-avatars
2025/07/30 16:49:12 modules/storage/storage.go:198:initRepoArchives() [I] Initialising Repository Archive storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/repo-archive
2025/07/30 16:49:12 modules/storage/storage.go:208:initPackages() [I] Initialising Packages storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/packages
2025/07/30 16:49:12 modules/storage/storage.go:219:initActions() [I] Initialising Actions storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/actions_log
2025/07/30 16:49:12 modules/storage/storage.go:223:initActions() [I] Initialising ActionsArtifacts storage with type: local
2025/07/30 16:49:12 modules/storage/local.go:33:NewLocalStorage() [I] Creating new Local Storage at /var/lib/gitea/data/actions_artifacts
```
### Create the gitea user
```bash
useradd gitea
su - gitea
ssh-keygen
exit
cp ~/.ssh/authorized_keys /home/gitea/.ssh/authorized_keys
chown gitea:gitea /home/gitea/.ssh/authorized_keys
loginctl enable-linger $(id -u gitea)
```
SSH into the server as gitea
```bash
systemctl --user enable podman-restart
systemctl --user enable --now podman.socket
mkdir -p ~/.config/containers/systemd
systemctl --user --machine=gitea@.host enable podman-restart
systemctl --user --machine=gitea@.host enable --now podman.socket
su -l gitea
mkdir -p .config/containers/systemd
mkdir data config postgres
exit
```
### Convert Compose to Quadlet
@@ -69,32 +72,60 @@ quay.io/k9withabone/podlet \
compose $(pwd)/active/podman_gitea/compose/compose.yaml
# Copy the files to the server
scp -r $(pwd)/active/podman_gitea/quadlets/. gitea:~/.config/containers/systemd/
scp -r $(pwd)/active/podman_gitea/quadlets/. 3dserver:/home/gitea/.config/containers/systemd/
```
### Install Quadlets
The first user you register will be the admin
First, set up the volumes needed by the container.
```bash
ssh gitea systemctl --user daemon-reload
ssh gitea systemctl --user restart gitea postgres
# Enter the container namespace
podman unshare
# Create the volumes
mkdir gitea_data
chown -R 1000:1000 gitea_data
mkdir gitea_etc
chown -R 1000:1000 gitea_etc
exit
```
Now launch the service. The first user you register will be the admin.
```bash
# Create a systemctl viable shell
machinectl shell gitea@
systemctl --user daemon-reload
systemctl --user restart gitea postgres
# Enables auto-update service which will pull new container images automatically every day
ssh gitea systemctl --user enable --now podman-auto-update.timer
systemctl --user enable --now podman-auto-update.timer
```
### Upgrade Quadlets
### Upgrade
1. Check [the blog](https://blog.gitea.com/) for any breaking changes.
2. Update the `compose.yaml` with any needed changes
3. [Regenerate the quadlets](#convert-compose-to-quadlet)
4. Upload the new quadlets and restart the service
```bash
scp -r quadlets/. gitea:~/.config/containers/systemd/
ssh gitea systemctl --user daemon-reload
ssh gitea systemctl --user restart gitea postgres
# Upload quadlets and restart
export PODMAN_SERVER=3dserver
scp -r active/podman_gitea/quadlets/. $PODMAN_SERVER:/home/gitea/.config/containers/systemd/
ssh $PODMAN_SERVER chown -R gitea:gitea /home/gitea/.config/containers/systemd/
ssh $PODMAN_SERVER
machinectl shell gitea@
systemctl --user daemon-reload
systemctl --user restart gitea postgres
```
### Editing Configs within Container
### Editing Gitea Config
```bash
apk add vim
# Use podman unshare to work within the container's namespace
podman unshare vim ~/gitea_data/gitea/conf/app.ini
```
## Gitea Runners

View File

@@ -4,11 +4,15 @@ Requires=postgres.service
[Container]
AutoUpdate=registry
ContainerName=gitea
Image=docker.gitea.com/gitea:1.24
Environment=GITEA__database__DB_TYPE=postgres GITEA__database__HOST=postgres:5432 GITEA__database__NAME=gitea GITEA__database__USER=gitea GITEA__database__PASSWD=gitea
Image=docker.gitea.com/gitea:1.25-rootless
Network=gitea.network
PublishPort=3000:3000
PublishPort=2222:2222
SecurityLabelDisable=true
Volume=/home/gitea/gitea_data:/data:Z
Volume=/home/gitea/gitea_etc:/etc/gitea:Z
Volume=/home/gitea/gitea_custom:/var/lib/gitea/custom:Z
Volume=/etc/localtime:/etc/localtime:ro
[Service]

View File

@@ -4,6 +4,7 @@ ContainerName=postgres
Environment=POSTGRES_USER=gitea POSTGRES_PASSWORD=gitea POSTGRES_DB=gitea
Image=docker.io/library/postgres:15
Network=gitea.network
SecurityLabelDisable=true
Volume=/home/gitea/gitea_postgres:/var/lib/postgresql/data:Z
[Service]

View File

@@ -0,0 +1,3 @@
# Compose
Put your compose.yaml here.

View File

@@ -0,0 +1,79 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:v2.3.1
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
- /home/immich/library:/data:Z
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
networks:
- immich
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: release-cuda
image: ghcr.io/immich-app/immich-machine-learning:release
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- /home/immich/model-cache:/cache:Z
env_file:
- .env
restart: always
healthcheck:
disable: false
networks:
- immich
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
healthcheck:
test: redis-cli ping || exit 1
restart: always
networks:
- immich
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
POSTGRES_DB: immich
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
- /home/immich/postgres:/var/lib/postgresql/data:Z
shm_size: 128mb
restart: always
networks:
- immich
networks:
immich:
enable_ipv6: true

View File

@@ -0,0 +1,193 @@
# Podman immich
- [Podman immich](#podman-immich)
- [Setup immich Project](#setup-immich-project)
- [Install immich](#install-immich)
- [Create the immich user](#create-the-immich-user)
- [Write the immich compose spec](#write-the-immich-compose-spec)
- [A Note on Volumes](#a-note-on-volumes)
- [Convert immich compose spec to quadlets](#convert-immich-compose-spec-to-quadlets)
- [Create any container-mounted directories](#create-any-container-mounted-directories)
- [Start and enable your systemd quadlet](#start-and-enable-your-systemd-quadlet)
- [Expose immich](#expose-immich)
- [firewalld](#firewalld)
- [Backup immich](#backup-immich)
- [Upgrade immich](#upgrade-immich)
- [Upgrade Quadlets](#upgrade-quadlets)
- [Upload Images in Bulk](#upload-images-in-bulk)
- [Uninstall](#uninstall)
- [Notes](#notes)
- [SELinux](#selinux)
## Setup immich Project
- [x] Copy and rename this folder to active/podman_immich
- [x] Find and replace immich with the name of the service.
- [x] Create the rootless user to run the podman containers
- [ ] Write the compose.yaml spec for your service
- [ ] Convert the compose.yaml spec to a quadlet
- [ ] Install the quadlet on the podman server
- [ ] Expose the quadlet service
- [ ] Install a backup service and timer
## Install immich
### Create the immich user
```bash
# SSH into your podman server as root
useradd immich
loginctl enable-linger $(id -u immich)
systemctl --user --machine=immich@.host enable podman-restart
systemctl --user --machine=immich@.host enable --now podman.socket
mkdir -p /home/immich/.config/containers/systemd
```
### Write the immich compose spec
1. Pull down the immich files
```bash
# Pull the compose file
wget -O active/podman_immich/release-compose.yaml https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
# Pull the .env file
wget -O active/podman_immich/release-env https://github.com/immich-app/immich/releases/latest/download/example.env
```
2. Edit the compose.yaml. Replace all environment variables with their correct values.
3. Edit the .env file. Make sure to match exactly what is in the compose file.
#### A Note on Volumes
Named volumes are stored at `/home/immich/.local/share/containers/storage/volumes/`.
### Convert immich compose spec to quadlets
Run the following to convert a compose.yaml into the various `.container` files for systemd:
```bash
# Generate the systemd service
podman run \
--security-opt label=disable \
--rm \
-v $(pwd)/active/podman_immich/compose:/compose \
-v $(pwd)/active/podman_immich/quadlets:/quadlets \
quay.io/k9withabone/podlet \
-f /quadlets \
-i \
--overwrite \
compose /compose/compose.yaml
# Copy the files to the server
export PODMAN_SERVER=3dserver
scp -r active/podman_immich/quadlets/. $PODMAN_SERVER:/home/immich/.config/containers/systemd/
ssh $PODMAN_SERVER chown -R immich:immich /home/immich/.config/containers/systemd/
```
### Create any container-mounted directories
SSH into your podman server as root:
```bash
machinectl shell immich@
podman unshare
mkdir library postgres model-cache
```
### Start and enable your systemd quadlet
SSH into your podman server as root:
```bash
machinectl shell immich@
systemctl --user daemon-reload
systemctl --user restart immich-server.service immich-machine-learning.service
# Enable auto-update service which will pull new container images automatically every day
systemctl --user enable --now podman-auto-update.timer
```
### Expose immich
1. If you need a domain, follow the [DDNS instructions](/active/podman_ddns/ddns.md#install-a-new-ddns-service)
2. For a web service, follow the [Caddy instructions](/active/podman_caddy/caddy.md#adding-a-new-caddy-record)
3. Finally, follow your OS's guide for opening ports via its firewall service.
#### firewalld
```bash
# command to get current active zone and default zone
firewall-cmd --get-active-zones
firewall-cmd --get-default-zone
# command to open 443 on tcp
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
# command to open 80 and 443 on tcp and udp
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
# command to list available services and then open http and https
firewall-cmd --get-services
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
```
## Backup immich
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
## Upgrade immich
### Upgrade Quadlets
Upgrades should be a repeat of [writing the compose spec](#convert-immich-compose-spec-to-quadlets) and [installing the quadlets](#start-and-enable-your-systemd-quadlet)
```bash
export PODMAN_SERVER=
scp -r quadlets/. $PODMAN_SERVER$:/home/immich/.config/containers/systemd/
ssh immich systemctl --user daemon-reload
ssh immich systemctl --user restart immich
```
## Upload Images in Bulk
<https://docs.immich.app/features/command-line-interface/>
```bash
# Install the CLI
npm i -g @immich/cli
# immich login [url] [key]
immich login http://192.168.1.216:2283/api HFEJ38DNSDUEG
# Check the upload
immich upload --dry-run --recursive directory/
# Upload
immich upload --recursive directory/
```
## Uninstall
```bash
# Stop the user's services
systemctl --user disable podman-restart
podman container stop --all
systemctl --user disable --now podman.socket
systemctl --user disable --now podman-auto-update.timer
# Delete the user (this won't delete their home directory)
# userdel might spit out an error like:
# userdel: user immich is currently used by process 591255
# kill those processes and try again
userdel immich
```
## Notes
### SELinux
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
:z allows a container to share a mounted volume with all other containers.
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.

View File

@@ -0,0 +1,26 @@
# You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=/home/immich/library
# The location where your database files are stored. Network shares are not supported for the database
DB_DATA_LOCATION=/home/immich/postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
TZ=Etc/EST
# The Immich version to use. You can pin this to a specific version like "v1.71.0"
IMMICH_VERSION=release
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich
# Should match the container_name fields in the compose.yaml
REDIS_HOSTNAME=immich_redis
DB_HOSTNAME=immich_postgres

View File

@@ -0,0 +1,3 @@
# Quadlets
Put your quadlets here.

View File

@@ -0,0 +1,13 @@
[Container]
ContainerName=immich_postgres
Environment=POSTGRES_PASSWORD=postgres POSTGRES_USER=postgres POSTGRES_DB=immich POSTGRES_INITDB_ARGS=--data-checksums
Image=ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
Network=immich.network
ShmSize=128mb
Volume=/home/immich/postgres:/var/lib/postgresql/data:Z
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,12 @@
[Container]
ContainerName=immich_machine_learning
EnvironmentFile=.env
Image=ghcr.io/immich-app/immich-machine-learning:release
Network=immich.network
Volume=/home/immich/model-cache:/cache:Z
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,17 @@
[Unit]
Requires=redis.service database.service
[Container]
ContainerName=immich_server
EnvironmentFile=.env
Image=ghcr.io/immich-app/immich-server:v2.3.1
Network=immich.network
PublishPort=2283:2283
Volume=/home/immich/library:/data:Z
Volume=/etc/localtime:/etc/localtime:ro
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,5 @@
[Network]
IPv6=true
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,11 @@
[Container]
ContainerName=immich_redis
HealthCmd=redis-cli ping || exit 1
Image=docker.io/valkey/valkey:8-bookworm@sha256:fea8b3e67b15729d4bb70589eb03367bab9ad1ee89c876f54327fc7c6e618571
Network=immich.network
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -0,0 +1,74 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
# Do not edit the next line. If you want to change the media storage location on your system, edit the value of UPLOAD_LOCATION in the .env file
- ${UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:

View File

@@ -0,0 +1,22 @@
# You can find documentation for all the supported env variables at https://docs.immich.app/install/environment-variables
# The location where your uploaded files are stored
UPLOAD_LOCATION=./library
# The location where your database files are stored. Network shares are not supported for the database
DB_DATA_LOCATION=./postgres
# To set a timezone, uncomment the next line and change Etc/UTC to a TZ identifier from this list: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List
# TZ=Etc/UTC
# The Immich version to use. You can pin this to a specific version like "v2.1.0"
IMMICH_VERSION=v2
# Connection secret for postgres. You should change it to a random password
# Please use only the characters `A-Za-z0-9`, without special characters or spaces
DB_PASSWORD=postgres
# The values below this line do not need to be changed
###################################################################################
DB_USERNAME=postgres
DB_DATABASE_NAME=immich

View File

@@ -1,64 +1,59 @@
# Jellyfin
- [Jellyfin](#jellyfin)
- [Install](#install)
- [Upgrade](#upgrade)
- [Mounting Media Directory](#mounting-media-directory)
They have podman rootless instructions!
<https://jellyfin.org/docs/general/installation/container/#managing-via-systemd>
## Install
### Create jellyfin btrfs volume
1. Create the jellyfin user
```bash
useradd jellyfin
loginctl enable-linger $(id -u jellyfin)
systemctl --user --machine=jellyfin@.host enable podman-restart
systemctl --user --machine=jellyfin@.host enable --now podman.socket
mkdir -p /home/jellyfin/.config/containers/systemd
```
2. Update the jellyfin record in Caddy.
3. Open port 8096 in the firewall.
4. Copy the files to the server and start the service
```bash
export PODMAN_SERVER=3dserver
scp -r active/podman_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
ssh $PODMAN_SERVER
machinectl shell jellyfin@
systemctl --user daemon-reload
systemctl --user restart jellyfin
```
## Upgrade
1. Check [the blog](https://jellyfin.org/posts) for breaking changes
2. Update the `jellyfin.container` with the new image version
3. Update quadlets and restart the service
```bash
btrfs sub create /btrfs/jellyfin
# Upload quadlets and restart
export PODMAN_SERVER=3dserver
scp -r active/podman_jellyfin/quadlets/. $PODMAN_SERVER:/home/jellyfin/.config/containers/systemd/
ssh $PODMAN_SERVER chown -R jellyfin:jellyfin /home/jellyfin/.config/containers/systemd/
ssh $PODMAN_SERVER
machinectl shell jellyfin@
systemctl --user daemon-reload
systemctl --user restart jellyfin
```
Add /home/jellyfin mount to /etc/fstab
```bash
systemctl daemon-reload
mount -a --mkdir
```
### Create the jellyfin user
```bash
export JF_USER=jellyfin
useradd $JF_USER
su -l $JF_USER
ssh-keygen
exit
cp ~/.ssh/authorized_keys /home/$JF_USER/.ssh/authorized_keys
chown $JF_USER:$JF_USER /home/$JF_USER/.ssh/authorized_keys
loginctl enable-linger $(id -u $JF_USER)
```
SSH into the server as jellyfin
```bash
systemctl --user enable podman-restart
systemctl --user enable --now podman.socket
mkdir -p ~/.config/containers/systemd
mkdir jellyfin-config jellyfin-cache jellyfin-media
```
### Install jellyfin
~/.config/containers/systemd/jellyfin.container
```bash
# Copy the files to the server
scp -r active/podman_jellyfin/quadlets/. jellyfin:~/.config/containers/systemd/
ssh jellyfin systemctl --user daemon-reload
ssh jellyfin systemctl --user start jellyfin
ssh jellyfin journalctl --user -xeu jellyfin
ssh jellyfin systemctl --user enable --now podman-auto-update.timer
```
Update the jellyfin record in Caddy.
Open port 8096 in the firewall.
## Mounting Media Directory
Update /etc/fstab with the smb disk details.

View File

@@ -1,12 +1,12 @@
[Container]
Image=docker.io/jellyfin/jellyfin:latest
Image=docker.io/jellyfin/jellyfin:10.11.3
AutoUpdate=registry
PublishPort=8096:8096/tcp
UserNS=keep-id
SecurityLabelDisable=true
Volume=/home/jellyfin/jellyfin-config:/config:Z
Volume=/home/jellyfin/jellyfin-cache:/cache:Z
Volume=/home/jellyfin/jellyfin-media:/media:Z
Volume=/var/media:/media:Z
Network=jellyfin.network
[Service]

View File

@@ -0,0 +1,3 @@
# Compose
Put your compose.yaml here.

View File

@@ -0,0 +1,28 @@
# tuwunel
services:
matrix:
image: ghcr.io/matrix-construct/tuwunel:latest
restart: unless-stopped
ports:
- 8448:6167
volumes:
- /home/matrix/tuwunel-db:/var/lib/tuwunel
#- ./tuwunel.toml:/etc/tuwunel.toml
security_opt:
- "label=disable"
userns_mode: keep-id
environment:
TUWUNEL_SERVER_NAME: matrix.reeseapps.com # EDIT THIS
TUWUNEL_DATABASE_PATH: /var/lib/tuwunel
TUWUNEL_PORT: 6167
TUWUNEL_MAX_REQUEST_SIZE: 200000000 # in bytes, ~200 MB
TUWUNEL_ALLOW_REGISTRATION: 'false'
# TUWUNEL_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed.
#TUWUNEL_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true'
TUWUNEL_ALLOW_FEDERATION: 'true'
TUWUNEL_ALLOW_CHECK_FOR_UPDATES: 'true'
TUWUNEL_TRUSTED_SERVERS: '["matrix.org"]'
#TUWUNEL_LOG: warn,state_res=warn
TUWUNEL_ADDRESS: 0.0.0.0
#TUWUNEL_CONFIG: '/etc/tuwunel.toml' # Uncomment if you mapped config toml above

View File

@@ -0,0 +1,145 @@
# Podman matrix
- [Podman matrix](#podman-matrix)
- [Setup matrix Project](#setup-matrix-project)
- [Install matrix](#install-matrix)
- [Create the matrix user](#create-the-matrix-user)
- [Write the matrix compose spec](#write-the-matrix-compose-spec)
- [A Note on Volumes](#a-note-on-volumes)
- [Convert matrix compose spec to quadlets](#convert-matrix-compose-spec-to-quadlets)
- [Setup matrix users](#setup-matrix-users)
- [Expose matrix](#expose-matrix)
- [firewalld](#firewalld)
- [Backup matrix](#backup-matrix)
- [Upgrade matrix](#upgrade-matrix)
- [Upgrade Quadlets](#upgrade-quadlets)
- [Notes](#notes)
- [SELinux](#selinux)
## Setup matrix Project
- [x] Copy and rename this folder to active/podman_matrix
- [x] Find and replace matrix with the name of the service.
- [x] Create the rootless user to run the podman containers
- [x] Write the compose.yaml spec for your service
- [x] Convert the compose.yaml spec to a quadlet
- [x] Install the quadlet on the podman server
- [ ] Expose the quadlet service
- [ ] Install a backup service and timer
## Install matrix
### Create the matrix user
```bash
# SSH into your podman server as root
useradd matrix
loginctl enable-linger $(id -u matrix)
systemctl --user --machine=matrix@.host enable podman-restart
systemctl --user --machine=matrix@.host enable --now podman.socket
mkdir -p /home/matrix/.config/containers/systemd
```
### Write the matrix compose spec
Edit the compose.yaml at active/matrix/compose/compose.yaml
#### A Note on Volumes
Named volumes are stored at `~/.local/share/containers/storage/volumes/`.
### Convert matrix compose spec to quadlets
On your local machine:
```bash
# Generate the systemd service
podman run \
--security-opt label=disable \
--rm \
-v $(pwd)/active/podman_matrix/compose:/compose \
-v $(pwd)/active/podman_matrix/quadlets:/quadlets \
quay.io/k9withabone/podlet \
-f /quadlets \
-i \
--overwrite \
compose /compose/compose.yaml
# Copy the files to the server
scp -r active/podman_matrix/quadlets/. matrix:~/.config/containers/systemd/
# Copy the compose files to the server
scp -r active/podman_matrix/compose/. matrix:~/.config//
```
```bash
ssh matrix systemctl --user daemon-reload
ssh matrix systemctl --user restart matrix
# Enables auto-update service which will pull new container images automatically every day
ssh matrix systemctl --user enable --now podman-auto-update.timer
```
### Setup matrix users
```bash
podman run \
-v /home/matrix/tuwunel-db:/var/lib/tuwunel:Z \
-e TUWUNEL_SERVER_NAME=matrix.reeseapps.com \
-e TUWUNEL_DATABASE_PATH=/var/lib/tuwunel \
--userns=keep-id \
--
-it \
--rm \
ghcr.io/matrix-construct/tuwunel:latest \
--execute "users create_user ducoterra"
```
### Expose matrix
1. If you need a domain, follow the [DDNS instructions](/active/podman_ddns/ddns.md#install-a-new-ddns-service)
2. For a web service, follow the [Caddy instructions](/active/podman_caddy/caddy.md#adding-a-new-caddy-record)
3. Finally, follow your OS's guide for opening ports via its firewall service.
#### firewalld
```bash
# command to get current active zone and default zone
firewall-cmd --get-active-zones
firewall-cmd --get-default-zone
# command to open 443 on tcp
firewall-cmd --permanent --zone=<zone> --add-port=443/tcp
# command to open 80 and 443 on tcp and udp
firewall-cmd --permanent --zone=<zone> --add-port={80,443}/{tcp,udp}
# command to list available services and then open http and https
firewall-cmd --get-services
firewall-cmd --permanent --zone=<zone> --add-service={http,https}
```
## Backup matrix
Follow the [Borg Backup instructions](/active/systemd_borg/borg.md#set-up-a-client-for-backup)
## Upgrade matrix
### Upgrade Quadlets
Upgrades should be a repeat of [writing the compose spec](#convert-compose-to-quadlet) and [installing the quadlets](#convert-compose-to-quadlet)
```bash
scp -r quadlets/. matrix:~/.config/containers/systemd/
ssh matrix systemctl --user daemon-reload
ssh matrix systemctl --user restart matrix
```
## Notes
### SELinux
<https://blog.christophersmart.com/2021/01/31/podman-volumes-and-selinux/>
:z allows a container to share a mounted volume with all other containers.
:Z allows a container to reserve a mounted volume and prevents any other container from accessing.

View File

@@ -0,0 +1,3 @@
# Quadlets
Put your quadlets here.

View File

@@ -0,0 +1,13 @@
[Container]
Environment=TUWUNEL_SERVER_NAME=matrix.reeseapps.com TUWUNEL_DATABASE_PATH=/var/lib/tuwunel TUWUNEL_PORT=6167 TUWUNEL_MAX_REQUEST_SIZE=200000000 TUWUNEL_ALLOW_REGISTRATION=false TUWUNEL_ALLOW_FEDERATION=true TUWUNEL_ALLOW_CHECK_FOR_UPDATES=true TUWUNEL_TRUSTED_SERVERS=["matrix.org"] TUWUNEL_ADDRESS=0.0.0.0
Image=ghcr.io/matrix-construct/tuwunel:latest
PublishPort=8448:6167
SecurityLabelDisable=true
UserNS=keep-id
Volume=/home/matrix/tuwunel-db:/var/lib/tuwunel
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -16,31 +16,12 @@ Find and replace minecraft with the name of the service.
### Create the minecraft user
```bash
# As root
# SSH into your podman server as root
useradd minecraft
su - minecraft
# Set the minecraft user password
passwd
# Generate an SSH key for this user. We don't need it now, but it might be handy later.
ssh-keygen
# Exit back to root.
exit
# Optional, copy your authorized_keys into the minecraft user's home dir so you can SSH in.
cp ~/.ssh/authorized_keys /home/minecraft/.ssh/authorized_keys
chown minecraft:minecraft /home/minecraft/.ssh/authorized_keys
# Allow user systemd services to run after you've died (logged out).
loginctl enable-linger $(id -u minecraft)
```
SSH into the server as minecraft
```bash
systemctl --user enable podman-restart
systemctl --user enable --now podman.socket
mkdir -p ~/.config/containers/systemd
systemctl --user --machine=minecraft@.host enable podman-restart
systemctl --user --machine=minecraft@.host enable --now podman.socket
mkdir -p /home/minecraft/.config/containers/systemd
```
### Convert Compose to Quadlet
@@ -100,10 +81,32 @@ ssh minecraft systemctl --user restart minecraft
1. Create your minecraft ddns record first [following these docs](/active/podman_ddns/ddns.md#)
2. Create a SRV record in your DNS provider like the following:
| Field | Value |
| ----------- | -------------------------------------- |
| Record name | _minecraft._tcp.testcraft.reeseapps.com |
| Value | 0 5 25566 minecraft.reeseapps.com |
active/podman_minecraft/secrets/reeseapps_records.json:
```json
{
"Comment": "CREATE/UPSERT/DELETE a record ",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "_minecraft._tcp.testcraft.reeseapps.com",
"Type": "SRV",
"TTL": 300,
"ResourceRecords": [
{
"Value": "0 5 25566 minecraft.reeseapps.com"
}
]
}
}
]
}
```
```bash
aws route53 change-resource-record-sets --hosted-zone-id $(cat active/aws_route53/secrets/reeseapps-zoneid) --change-batch file://active/podman_minecraft/secrets/reeseapps_records.json
```
3. Test your record with `nslookup`

View File

@@ -9,13 +9,13 @@ ContainerName=nextcloud-aio-mastercontainer
Image=docker.io/nextcloud/all-in-one:latest
PublishPort=0.0.0.0:11001:8080
Volume=nextcloud_aio_mastercontainer:/mnt/docker-aio-config
Volume=/run/user/1001/podman/podman.sock:/var/run/docker.sock:Z
Volume=/run/user/1002/podman/podman.sock:/var/run/docker.sock:Z
Network=bridge
SecurityLabelDisable=true
Environment=APACHE_PORT=11000
Environment=APACHE_IP_BINDING=0.0.0.0
Environment=WATCHTOWER_DOCKER_SOCKET_PATH=/run/user/1001/podman/podman.sock
Environment=WATCHTOWER_DOCKER_SOCKET_PATH=/run/user/1002/podman/podman.sock
Environment=NEXTCLOUD_DATADIR="/home/nextcloud/nextcloud_data"
Environment=SKIP_DOMAIN_VALIDATION=true

View File

@@ -3,13 +3,13 @@
- [Nextcloud AIO](#nextcloud-aio)
- [Install with Rootless Podman](#install-with-rootless-podman)
- [Create the nextcloud user](#create-the-nextcloud-user)
- [Install Podman](#install-podman)
- [Create the container autostart service](#create-the-container-autostart-service)
- [Install Nextcloud](#install-nextcloud)
- [Install Caddy](#install-caddy)
- [Firewall](#firewall)
- [Backups](#backups)
- [Maintenace Mode](#maintenace-mode)
- [Manual Backups](#manual-backups)
- [Maintenance Mode](#maintenance-mode)
- [Trusted Proxy](#trusted-proxy)
- [Default phone region](#default-phone-region)
- [Adding existing files](#adding-existing-files)
@@ -17,10 +17,13 @@
- [Changing the domain](#changing-the-domain)
- [Uninstall](#uninstall)
- [Edit QCOW](#edit-qcow)
- [Stuck in login screen](#stuck-in-login-screen)
- [Freezing after working for a bit](#freezing-after-working-for-a-bit)
- [Out of disk space](#out-of-disk-space)
- [Redis can't dump its DB](#redis-cant-dump-its-db)
- [Exclude Lists](#exclude-lists)
- [Troubleshooting](#troubleshooting)
- [Stuck in login screen](#stuck-in-login-screen)
- [Freezing after working for a bit](#freezing-after-working-for-a-bit)
- [Out of disk space](#out-of-disk-space)
- [Redis can't dump its DB](#redis-cant-dump-its-db)
- [Error connecting to server](#error-connecting-to-server)
<https://github.com/nextcloud/all-in-one>
@@ -34,32 +37,22 @@ This has been tested working on Fedora 41 with selinux and firewalld enabled.
```bash
useradd nextcloud
su - nextcloud
ssh-keygen
exit
cp ~/.ssh/authorized_keys /home/nextcloud/.ssh/authorized_keys
chown nextcloud:nextcloud /home/nextcloud/.ssh/authorized_keys
loginctl enable-linger $(id -u nextcloud)
```
### Install Podman
```bash
# As root user
dnf install podman
# Now SSH into the server as the nextcloud user
systemctl --user enable podman-restart
systemctl --user enable --now podman.socket
systemctl --user --machine=nextcloud@.host enable podman-restart
systemctl --user --machine=nextcloud@.host enable --now podman.socket
su -l nextcloud
mkdir -p /home/nextcloud/.config/containers/systemd
exit
```
### Create the container autostart service
Edit the autostart service to include "unless-stopped" containers.
As the nextcloud user:
`systemctl --user edit podman-restart.service`
```bash
machinectl shell nextcloud@
systemctl --user edit podman-restart.service
```
```conf
[Service]
@@ -74,21 +67,35 @@ systemctl --user daemon-reload
### Install Nextcloud
On the operator
1. Edit `nextcloud-aio-mastercontainer.container` to include the correct username and UID where relevant.
2. Copy the files to the server:
```bash
# Make the container systemd directory (if needed)
ssh nextcloud mkdir -p ~/.config/containers/systemd
# Create the nextcloud network with ipv6
ssh nextcloud podman network create --ipv6 nextcloud-aio
# Copy the quadlet files
scp \
active/podman_nextcloud/nextcloud-aio-mastercontainer.container \
nextcloud:.config/containers/systemd/
3dserver:/home/nextcloud/.config/containers/systemd/
ssh chown -R nextcloud:nextcloud /home/nextcloud/.config/containers/systemd/
```
On the server
```bash
machinectl shell nextcloud@
# Create volumes
mkdir nextcloud_data
mkdir nextcloud_aio_mastercontainer
# Create the nextcloud network with ipv6
podman network create --ipv6 nextcloud-aio
# Reload and restart the service
ssh nextcloud systemctl --user daemon-reload
ssh nextcloud systemctl --user restart nextcloud-aio-mastercontainer
systemctl --user daemon-reload
systemctl --user restart nextcloud-aio-mastercontainer
```
### Install Caddy
@@ -168,7 +175,15 @@ If you need to reset the borg backup repo:
docker exec nextcloud-aio-borgbackup rm /mnt/docker-aio-config/data/borg.config
```
## Maintenace Mode
### Manual Backups
1. Backup `nextcloud_data`
2. Backup all nextcloud volumes at `/home/nextcloud/.local/share/containers/storage/volumes/`
3. Backup `.config/containers/systemd/`
Copy these back to where they came to restore
## Maintenance Mode
```bash
docker stop nextcloud-aio-apache
@@ -230,7 +245,127 @@ sudo qemu-nbd -c /dev/nbd0 --read-only /path/to/image.qcow2
udisksctl mount -b /dev/nbd0p1
```
## Stuck in login screen
## Exclude Lists
Exclude lists take effect only if the folders/files haven't been synced yet. Here's a basic one
I stole that works well for development resources.
Put this in `~/.config/Nextcloud/sync-exclude.list`
```text
]*~
]~$*
].~lock.*
]~*.tmp
]*.~*
]Icon\r*
].DS_Store
].ds_store
]*.textClipping
]._*
]Thumbs.db
]photothumb.db
]System Volume Information
].*.sw?
].*.*sw?
].TemporaryItems
].Trashes
].DocumentRevisions-V100
].Trash-*
].fseventd
].apdisk
].Spotlight-V100
].directory
]*.part
]*.filepart
]*.crdownload
]*.kate-swp
]*.gnucash.tmp-*
].synkron.*
].sync.ffs_db
].symform
].symform-store
].fuse_hidden*
]*.unison
].nfs*
]My Saved Places.
]*.sb-*
]*.dll
]*.exe
].git/
].lock
]*.bin
].bin
]bin/
]*.lock
]node_modules/
].cache/
].vscode/
].pytest_cache/
].github/
].ipynb_checkpoints/
]*.exe
]*.dll
]*.class
]*.com
]*.so
]*.o
]@*/
]__pycache__/
].Python/
]build/
]dist/
]eggs/
].eggs/
]wheels/
]sdist/
]var/
]*.egg/
]*.egg-info/
]lib64/
]lib/
].tox/
].nox/
]env/
]venv/
]ENV/
]env.bak/
]venv.bak/
]site/
]cython_debug/
]vendor/
]tmp/
].libs/
].debs/
]src/
]Debug/
]debug/
]*.pdb
]*.enc
].enc
].sass-cache/
]_site/
].info
]*.info
].jekyll-cache
].jekyll-cache/
].zotero-ft-cache
].zotero-ft-info
]*.idlk
]zotero.sqlite.bak
]*.dwl
]*.dwl2
]*.bkp
]*.dtmp
].$*
]*.tmp
]_build/
].venv/
```
## Troubleshooting
### Stuck in login screen
Check logs at `/var/www/html/data/nextcloud.log` in `nextcloud-aio-nextcloud` container.
@@ -247,9 +382,9 @@ Sometimes this is caused by a broken app or twofactor. try:
./occ app:disable integration_openai
```
## Freezing after working for a bit
### Freezing after working for a bit
### Out of disk space
#### Out of disk space
This can happen when nextcloud tries to write logs to its volume and doesn't have enough space
@@ -258,7 +393,7 @@ podman exec -it nextcloud-aio-nextcloud bash
df -h .
```
### Redis can't dump its DB
#### Redis can't dump its DB
This can happen when the redis volume doesn't have the correct permissions
@@ -267,3 +402,9 @@ podman exec -it --user root nextcloud-aio-redis bash
ls -lah /data
chown redis:redis /data
```
#### Error connecting to server
Your nextcloud instance won't be able to use host loopback with rootless containers. If you have
a local DNS record pointing to your server's IP address you'll need to delete that until this is
fixed.

View File

@@ -1,6 +1,7 @@
# Ollama
- [Ollama](#ollama)
- [Firewall for Ollama](#firewall-for-ollama)
- [Install and run Ollama](#install-and-run-ollama)
- [Install and run Ollama with Podman](#install-and-run-ollama-with-podman)
- [Unsticking models stuck in "Stopping"](#unsticking-models-stuck-in-stopping)
@@ -15,6 +16,19 @@
<https://github.com/ollama/ollama>
## Firewall for Ollama
```bash
# Add home zone if you don't have one
sudo firewall-cmd --get-active-zones
sudo firewall-cmd --new-zone=home --permanent
sudo firewall-cmd --reload
# Set source address to allow connections
sudo firewall-cmd --zone=ollama --add-source=10.2.0.1/24 --permanent
sudo firewall-cmd --zone=ollama --add-port=11434/tcp --permanent
sudo firewall-cmd --reload
```
## Install and run Ollama
<https://ollama.com/download/linux>

View File

@@ -2,11 +2,13 @@
- [Borg Backup](#borg-backup)
- [Install Borg](#install-borg)
- [Set up a new root client](#set-up-a-new-root-client)
- [Set up a laptop or workstation client](#set-up-a-laptop-or-workstation-client)
- [Set up a new server client](#set-up-a-new-server-client)
- [Create a Backup Service](#create-a-backup-service)
- [Check backup service logs](#check-backup-service-logs)
- [Run a Manual Backup](#run-a-manual-backup)
- [Back up and Entire System](#back-up-and-entire-system)
- [Upgrade a Borg Repo](#upgrade-a-borg-repo)
## Install Borg
@@ -29,7 +31,81 @@ touch /home/backup/.ssh/authorized_keys
chown -R backup:backup /home/backup/.ssh
```
## Set up a new root client
## Set up a laptop or workstation client
For backing up your laptop or personal account.
1. On your personal account, set up the borg connection
```bash
export BACKUP_HOST="borg.reeselink.com"
ssh-keygen -C ${USER}@${HOSTNAME} -f ~/.ssh/id_${BACKUP_HOST}
cat <<EOF >> ~/.ssh/config
Host ${BACKUP_HOST}
Hostname ${BACKUP_HOST}
IdentityFile ~/.ssh/id_${BACKUP_HOST}
User backup
Port 22
EOF
echo "export CLIENT_FQDN=${USER}.${HOSTNAME}.reeselink.com"
echo "export SSH_PUBKEY=\"$(cat ~/.ssh/id_${BACKUP_HOST}.pub)\""
```
2. On the borg backup server as the backup user:
```bash
# Use echo from above
export CLIENT_FQDN=
export SSH_PUBKEY=
# Create the authkey entry to restrict the user's access to the borg repo folder
export BORG_COMMAND="cd /home/backup/repos/${CLIENT_FQDN}; borg serve --restrict-to-path /home/backup/repos/${CLIENT_FQDN}"
export AUTHKEY_ENTRY="command=\"${BORG_COMMAND}\",restrict ${SSH_PUBKEY}"
echo $AUTHKEY_ENTRY >> /home/backup/.ssh/authorized_keys
# Create the directory
mkdir repos/${CLIENT_FQDN}
```
3. On your personal account, create the repo and your first backup
```bash
# Do not include the first / in the path
export PATH_TO_BACKUP=home/${USER}
export BACKUP_HOST="borg.reeselink.com"
export BORG_REPO=${BACKUP_HOST}:home
# If not initialized, do that now
borg init --encryption none $BORG_REPO
borg list
# Run backup and timestamp it
borg create \
--verbose \
--filter AME \
--list \
--stats \
--progress \
--show-rc \
--compression lz4 \
--exclude-caches \
${BORG_REPO}::$(date +"%F-%H-%M-%S") \
/${PATH_TO_BACKUP}
# Mount a borg archive
borg mount $BORG_REPO::2025-05-14-00-44-05 /mnt/
# Restore a borg archive to a location (dry run)
# First, cd to the location you want to extract to
cd ~
# Then, extract to that location. --strip-components takes the first n items off a path
borg extract --dry-run --list --strip-components 2 $BORG_REPO::my-files home/USERNAME
```
## Set up a new server client
Backups will be run as the root user. Generate them an SSH key to
@@ -57,6 +133,8 @@ export SSH_PUBKEY="ssh-rsa abcd1234 backup@fqdn.something.com"
export BORG_COMMAND="cd /home/backup/repos/${CLIENT_FQDN}; borg serve --restrict-to-path /home/backup/repos/${CLIENT_FQDN}"
export AUTHKEY_ENTRY="command=\"${BORG_COMMAND}\",restrict ${SSH_PUBKEY}"
echo $AUTHKEY_ENTRY >> /home/backup/.ssh/authorized_keys
mkdir /home/backup/repos/${CLIENT_FQDN}
chown -R backup:backup /home/backup/repos/${CLIENT_FQDN}
```
## Create a Backup Service
@@ -69,15 +147,14 @@ borg_user: backup
borg_host: borg.reeselink.com
borg_passphrase: ""
backup_dirs:
- /home
- /home/foobar
exclude_dirs: []
keep_daily: 7
keep_weekly: 4
keep_monthly: 1
stop_services: []
stop_user_services:
- gitea
- postgres
- foobar
```
```bash
@@ -85,7 +162,7 @@ stop_user_services:
for var_file in $(ls active/systemd_borg/secrets); do
ansible-playbook \
-i ansible/inventory.yaml \
-l podman \
-l 3dserver \
active/systemd_borg/install_backup.yaml \
-e "@active/systemd_borg/secrets/$var_file"
done
@@ -94,7 +171,8 @@ done
## Check backup service logs
```bash
ssh podman journalctl -u 'backup-*' -f
export SERVER_SSH_NAME=
ssh $SERVER_SSH_NAME journalctl -u 'backup-*' -f
```
## Run a Manual Backup
@@ -171,3 +249,7 @@ borg create \
${BORG_REPO}::$(date +"%F-%H-%M-%S") \
/
```
## Upgrade a Borg Repo
<https://borgbackup.readthedocs.io/en/stable/usage/upgrade.html>

View File

@@ -0,0 +1,181 @@
# Clamav
- [Clamav](#clamav)
- [Installation](#installation)
- [Notifications](#notifications)
- [Selinux](#selinux)
- [On Access Scanning](#on-access-scanning)
- [Testing](#testing)
- [Ignore Signatures](#ignore-signatures)
<https://wiki.archlinux.org/title/ClamAV>
## Installation
<https://docs.clamav.net/manual/Usage/Configuration.html#first-time-set-up>
```bash
# Install
sudo dnf install clamav clamav-freshclam clamd
##### Set up Freshclam #####
# Create freshclam's log file
sudo touch /var/log/freshclam.log
sudo chmod 600 /var/log/freshclam.log
sudo chown clamscan /var/log/freshclam.log
# Copy configuration files
sudo cp active/software_clamav/freshclam.conf
sudo chown root:root /etc/freshclam.conf
sudo chmod u=rw,go=r /etc/freshclam.conf
# Update the freshclam DB
sudo freshclam
sudo systemctl enable clamav-freshclam --now
##### Set up Clamd #####
# Create clamd's log file
sudo touch /var/log/clamd.scan
sudo chmod 600 /var/log/clamd.scan
sudo chown clamscan /var/log/clamd.scan
# Copy configuration files
# NOTE: Edit scan.conf OnAccessIncludePath to point to your home dir
vim active/software_clamav/scan.conf
sudo cp active/software_clamav/scan.conf /etc/clamd.d/scan.conf
sudo chown root:root /etc/clamd.d/scan.conf
sudo chmod u=rw,go=r /etc/clamd.d/scan.conf
# Allow clamav with selinux
sudo setsebool -P antivirus_can_scan_system 1
```
Edit the `clamd@` service to limit system resources.
```bash
sudo -E systemctl edit clamd@
[Service]
Nice=18
IOSchedulingClass=idle
CPUSchedulingPolicy=idle
```
Then start the clamd service
```bash
sudo systemctl daemon-reload
sudo systemctl enable --now clamd@scan
sudo systemctl status clamd@scan
```
Scan something
```bash
sudo clamdscan -c /etc/clamd.d/scan.conf --multiscan --fdpass ~/Downloads
```
Allow your user to run scans
```bash
sudo -E usermod -aG virusgroup $USER
```
## Notifications
Create a new file called `/etc/clamav/virust-event.sh` and add the following
```bash
#!/bin/bash
PATH=/usr/bin
ALERT="Signature detected by clamav: $CLAM_VIRUSEVENT_VIRUSNAME in $CLAM_VIRUSEVENT_FILENAME"
# Send an alert to all graphical users.
for ADDRESS in /run/user/*; do
# Skip root, they likely won't have a desktop session anyway
if [ ${ADDRESS} != "/run/user/0" ]; then
USERID=${ADDRESS#/run/user/}
/usr/bin/sudo -u "#$USERID" DBUS_SESSION_BUS_ADDRESS="unix:path=$ADDRESS/bus" PATH=${PATH} \
/usr/bin/notify-send -u critical -i dialog-warning "ClamAV Alert!" "$ALERT"
fi
done
```
Then ensure you have `VirusEvent /etc/clamav/virus-event.bash` in your
`scan.conf`.
Allow clamav to run notify-send in `/etc/sudoers.d/clamav` by adding `clamav
ALL = (ALL) NOPASSWD: SETENV: /usr/bin/notify-send`.
### Selinux
Troubleshooting notification permission denied errors is tricky, but it basically involves:
1. Disable selinux hidden denies: `sudo semodule -DB`
2. Clear the selinux audit logs: `sudo rm /var/log/audit/audit.log*`
3. Set enforce to permissive: `sudo setenforce 0`
4. Try to access eicar.com with clamonacc enabled
5. Capture the audit logs in a `sudo ausearch --raw | audit2allow -m clamav-rules`
6. Set enforce to enforcing: `sudo setenforce 1`
7. Re-enable selinux hidden denies (if you want): `sudo semodule -B`
8. `sudo setsebool daemons_enable_cluster_mode on`
9. `sudo semodule -X 300 -i active/os_fedora/selinux_policies/clamav-notifysend.pp`
10. `sudo semodule -X 300 -i active/os_fedora/selinux_policies/clamav-sudo.pp`
11. `sudo semodule -X 300 -i active/os_fedora/selinux_policies/clamav-unixchkpwd.pp`
## On Access Scanning
If you want to destroy your computer you can enable on-access scanning.
My recommendation is to only enable on-access scanning for critical ingress
paths, like `~/Downloads` or `~/tmp`. This will help keep system resources free
while also scanning critical points on your system.
```bash
sudo -E systemctl edit clamav-clamonacc.service
[Service]
ExecStart=
ExecStart=/usr/sbin/clamonacc -F --fdpass --config-file=/etc/clamd.d/scan.conf
sudo systemctl daemon-reload
sudo systemctl enable --now clamav-clamonacc.service
```
## Testing
The `eicar` test malware allows you to test any malware scanner, as every
scanner should have its signature included in its database.
1. Create a new file called `eicar.com`
2. Add the contents: `X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*`
3. Save and scan: `clamdscan --fdpass --multiscan eicar.com`
If you have on access scanning enabled you can try the following
```bash
cd ~/Downloads/
wget https://secure.eicar.org/eicar.com.txt
# This should not work
cat eicar.com.txt
```
## Ignore Signatures
<https://docs.clamav.net/faq/faq-ignore.html>
```bash
# Create the ignore list
cd /var/lib/clamav
touch ignore_list.ign2
```
Then add an ignore, like `PUA.Win.Trojan.Xored-1` which is a [known false
positive](https://github.com/jensyt/imurmurhash-js/issues/1).
Then `systemctl restart clamd@scan`.

View File

@@ -0,0 +1,8 @@
LogFileMaxSize 100M
LogTime yes
DatabaseDirectory /var/lib/clamav
DatabaseOwner clamupdate
DatabaseMirror database.clamav.net
Bytecode yes

View File

@@ -0,0 +1,62 @@
# ClamAV will refuse to scan files above 2G regardless of what this is set to
MaxFileSize 2G
# MaxScanSize controls how much of an archive is unpacked
MaxScanSize 64G
LogFileMaxSize 50M
LogTime yes
LogSyslog yes
ExtendedDetectionInfo yes
LocalSocket /var/run/clamd.scan/clamd.socket
LocalSocketGroup virusgroup
LocalSocketMode 660
FixStaleSocket yes
MaxThreads 8
MaxDirectoryRecursion 20
User clamscan
Bytecode yes
HeuristicAlerts yes
DetectPUA yes
ScanPE yes
ScanELF yes
ScanMail yes
ScanHTML yes
ScanOLE2 yes
AlertBrokenExecutables no
AlertBrokenMedia no
AlertEncrypted no
AlertEncryptedArchive no
AlertEncryptedDoc no
AlertOLE2Macros no
AlertPartitionIntersection no
AlertExceedsMax no
ScanPDF yes
ScanSWF yes
ScanXMLDOCS yes
ScanHWP3 yes
ScanArchive yes
# These are just examples, add what you think should be protected.
OnAccessIncludePath /home/ducoterra/Downloads
OnAccessIncludePath /home/ducoterra/Projects
OnAccessIncludePath /home/ducoterra/Applications
OnAccessIncludePath /home/ducoterra/AUR
# Prevention doesn't work with OnAccessMountPath.
# It works with OnAccessIncludePath, as long as /usr and /etc are not included.
# Including /var while activating prevention is also not recommended, because
# this would slow down package installation by a factor of 1000.
OnAccessPrevention yes
OnAccessExcludeUname clamupdate
OnAccessExcludeUname clamscan
OnAccessExtraScanning yes
VirusEvent /etc/clamav/virus-event.bash

View File

@@ -1,5 +1,47 @@
# FFMPEG
## Slow down a gif
```bash
ffmpeg \
-r 100 \
-i "solid-snake-mgs.gif" \
-vf " minterpolate=50,split[g][p]; [p]palettegen[p]; [g][p]paletteuse" out.gif
```
## Combine images into a gif
```bash
ffmpeg \
-f image2 \
-framerate 3 \
-i frame_%d.jpeg video.gif
```
## Compression
```bash
ffmpeg \
-hwaccel vaapi \
-vaapi_device /dev/dri/renderD128 \
-hwaccel_output_format vaapi \
-i input.mp4 \
-c:v hevc_vaapi \
-rc_mode 1 \
-qp 25 \
output.mp4
ffmpeg \
-hwaccel vaapi \
-vaapi_device /dev/dri/renderD128 \
-hwaccel_output_format vaapi \
-i input.mp4 \
-c:v h264_vaapi \
-b:v 0 \
-maxrate 10M \
output.mp4
```
## DVD Ripping
```bash
@@ -21,4 +63,4 @@ ffmpeg \
-c:a aac \
-b:a 256k \
~/Downloads/VTS_01_1.mp4
```
```

View File

@@ -0,0 +1,33 @@
# Firewalld
## Notes
```bash
# Add a port
firewall-cmd --permanent --add-port=22/tcp
# List active zones
firewall-cmd --get-active-zones
# Set default zone
firewall-cmd --set-default-zone=drop
# Set zone for a specific subnet
firewall-cmd --permanent --zone=drop --add-source=10.244.0.0/16
# Get info about service
firewall-cmd --info-service=samba
# Get zone information
firewall-cmd --info-zone=drop
```
## Inspecting Zones
```bash
# List all active rules
firewall-cmd --list-all
# Log all denies
firewall-cmd --set-log-denied=all
```

View File

@@ -4,8 +4,10 @@
- [Notes](#notes)
- [Quickstart Debugging Setup](#quickstart-debugging-setup)
- [Quickstart Production Setup](#quickstart-production-setup)
- [NFS](#nfs)
- [Tips](#tips)
- [Adding a user](#adding-a-user)
- [Adding a Smart Card Certificate](#adding-a-smart-card-certificate)
- [PIV](#piv)
- [Sources](#sources)
- [Set up PIV Auth on the Host where you Installed FreeIPA](#set-up-piv-auth-on-the-host-where-you-installed-freeipa)
@@ -17,6 +19,8 @@
- [Finding devices in sysfs WIP](#finding-devices-in-sysfs-wip)
- [Finding p11 devices WIP](#finding-p11-devices-wip)
- [Arch Client WIP](#arch-client-wip)
- [Troubleshooting](#troubleshooting)
- [Changing IP address](#changing-ip-address)
An AD Server.
@@ -71,6 +75,10 @@ dnf install ipa-server-dns bind-dyndb-ldap -y
# 2. We don't need to scan for additional zones
ipa-server-install --setup-dns
# Setup firewall
firewall-cmd --add-service=freeipa-ldap --add-service=freeipa-ldaps --add-service=dns --permanent
firewall-cmd --reload
# Install flatpak
dnf install flatpak
flatpak remote-add --if-not-exists flathub https://dl.flathub.org/repo/flathub.flatpakrepo
@@ -93,41 +101,99 @@ Now skip to [Get PIV Working](#piv)
<https://www.freeipa.org/page/Quick_Start_Guide>
- Set your hostname to your server's fqdn with `hostnamectl hostname freeipa.reeselink.com`
- Ensure you have a DNS entry pointing to your host
- Open ports:
1. Set your hostname to your server's fqdn with `hostnamectl hostname freeipa.reeselink.com`
2. If you want freeipa to manage your DNS, make sure you don't have a DNS address pointing to your domain
3. Open freeipa ports
```bash
firewall-cmd --add-service=freeipa-ldap --add-service=freeipa-ldaps
firewall-cmd --add-service=freeipa-ldap --add-service=freeipa-ldaps --permanent
```
```bash
firewall-cmd --add-service=freeipa-4
firewall-cmd --add-service=freeipa-4 --permanent
firewall-cmd --reload
```
- Set a permanet DNS resolver: `sudo echo "nameserver 1.1.1.1" > /etc/resolv.conf`
- Disable NetworkManager DNS management
4. Set a permanent DNS resolver
```bash
vim /etc/NetworkManager/NetworkManager.conf
```bash
rm /etc/resolv.conf
echo "nameserver 1.1.1.1" > /etc/resolv.conf
```
[main]
dns=none
```
5. Disable NetworkManager DNS management
Note, if installing for local use only, set /etc/hosts and reply "yes" to configure dns.
```bash
vim /etc/NetworkManager/NetworkManager.conf
```bash
vim /etc/hosts
[main]
dns=none
```
192.168.122.100 freeipa.reeselink.com
```
- Restart NetworkManager: `systemctl restart NetworkManager`
- Ensure resolv.conf hasn't been repopulated: `cat /etc/resolv.conf`
- Install freeipa: `dnf install -y freeipa-server freeipa-server-dns`
- Install the server (mostly choose defaults and sane options): `ipa-server-install`
- Authenticate as admin: `kinit admin`
6. Reboot
7. Ensure resolv.conf hasn't been repopulated: `cat /etc/resolv.conf`
8. Install freeipa: `dnf install -y freeipa-server freeipa-server-dns`
9. Install the server (mostly choose defaults and sane options): `ipa-server-install`
10. Authenticate as admin: `kinit admin`
Now skip to [Get PIV Working](#piv)
## NFS
<https://www.techrepublic.com/article/kerberos-authentication-with-nfsv4/>
```bash
mkdir /exports
chmod 1777 /exports
mount --bind /srv /exports/srv
```
What this does is remounts /srv to /exports/srv. Effectively, this means that
/srv can be accessed directly, or via /exports/srv and changes in one location
reflect in the other.
To make this persistent, add the mount command above to /etc/rc.d/rc.local or
some similar executed-at-boot script.
Next, edit `/etc/sysconfig/nfs` and enable the SECURE_NFS option:
```conf
SECURE_NFS="yes"
```
Then edit `/etc/exports`. The “/exports” entry has the “fsid=0” option, which
tells NFS that this is the “root” export.
```fstab
/exports gss/krb5(rw,sync,fsid=0,insecure,no_subtree_check)
/exports/srv gss/krb5(rw,sync,nohide,insecure,no_subtree_check)
```
Create the server principal for the NFS server and add it to the keytab file on
the server using kadmin (usually /etc/krb5.keytab):
```bash
kadmin.local
kadmin.local: addprinc -randkey nfs/nfsserver.domain.com
kadmin.local: ktadd -e des-cbc-crc:normal -k /etc/krb5.keytab nfs/nfsserver.domain.com
kadmin.local: quit
```
Edit `/etc/idmapd.conf` and make sure the Nobody-User and Nobody-Group options
are correct (i.e. on Red Hat Enterprise Linux and Fedora, use the “nfsnobody”
user and group, other distributions may just use “nobody”)
```bash
systemctl restart nfs rpcidmapd
```
open TCP port 2049 for use with NFsv4
```bash
firewall-cmd --add-service=nfs
firewall-cmd --add-service=nfs --permanent
firewall-cmd --reload
```
## Tips
```bash
@@ -147,6 +213,18 @@ reboot
- `ipa passwd <user>`
- `kinit <user>`
### Adding a Smart Card Certificate
1. Login to the UI as admin
2. Navigate to your user
3. Actions -> New Certificate
4. Generate a self-signed CSR with your yubikey
5. Paste the CSR into the CSR field
6. Generate
7. Download the certificate from the user page
8. Import the certificate into slot 9a for your yubikey
9. `kinit -X X509_user_identity='PKCS11:opensc-pkcs11.so' idm_user`
## PIV
### Sources
@@ -249,36 +327,34 @@ hostnamectl set-hostname client.reese.reeselink.com
# OPTIONAL: You need to leave any existing AD realm before joining a new one
# realm leave <some-realm>
```
Add the freeipa server to our /etc/hosts so we don't need to set up DNS
# Install pcscd
dnf install pcsc-lite opensc
# Start the pcscd server
systemctl enable --now pcscd
```bash
vim /etc/hosts
`192.168.122.195 freeipa.reese.reeselink.com`
# Update client's dns server to use freeipa
vim /etc/resolv.conf
nameserver 192.168.122.130
# This should populate /etc/krb5.conf and /etc/sssd/sssd.conf
realm join -U someuser freeipa.reese.reeselink.com -v
# AD should be configured to create the user's home dir, but to be safe
export freeipa_user=ducoterra
mkdir /home/$freeipa_user
chown $freeipa_user:$freeipa_user /home/$freeipa_user
# Check login
su - $freeipa_user
realm join -U admin freeipa.reese.reeselink.com -v
# With kinit
kinit -X X509_user_identity='PKCS11:opensc-pkcs11.so' idm_user
# With sssctl
cp /etc/ipa/ca.crt /etc/sssd/pki/sssd_auth_ca_db.pem
dnf install -y sssd-tools
cp /etc/ipa/ca.crt /etc/sssd/pki/sssd_auth_ca_db.pem
chmod 600 /etc/ipa/ca.crt /etc/sssd/pki/sssd_auth_ca_db.pem
authselect enable-feature with-smartcard
# required: authselect enable-feature with-smartcard-required
# lock on remove: authselect enable-feature with-smartcard-lock-on-removal
# set "pam_cert_auth = True" in [pam] section of /etc/sssd/sssd.conf
```
IMPORTANT: set `pam_cert_auth = True` in `[pam]` section of `/etc/sssd/sssd.conf`.
```bash
systemctl restart sssd
sssctl user-checks -s gdm-smartcard "ducoterra" -a auth
```
@@ -389,3 +465,11 @@ vim /etc/krb5.conf
- Log in with your user: `kinit <user>`
- List your tickets: `klist`
## Troubleshooting
### Changing IP address
Changing the IP address of a freeipa server can break dnssec. You'll get
"servfail" looking things up. The quick way to fix this is to edit
`/etc/named/ipa-options-ext.conf` and to set `dnssec-validation no;`.

221
active/software_gpg/gpg.md Normal file
View File

@@ -0,0 +1,221 @@
# GPG
- [GPG](#gpg)
- [Searching for GPG Keys](#searching-for-gpg-keys)
- [Importing GPG Keys](#importing-gpg-keys)
- [Generate GPG Keys](#generate-gpg-keys)
- [Change Key Password](#change-key-password)
- [Renewing GPG Keys](#renewing-gpg-keys)
- [Export GPG Keys](#export-gpg-keys)
- [GPG Key Servers](#gpg-key-servers)
- [Delete GPG Keys](#delete-gpg-keys)
- [Using GPG keys](#using-gpg-keys)
- [Signing Files](#signing-files)
- [Encrypting Files](#encrypting-files)
- [Yubikey](#yubikey)
- [Linux Apps](#linux-apps)
- [Evolution Email](#evolution-email)
- [Android Apps](#android-apps)
- [OpenKeychain](#openkeychain)
- [Fair Email](#fair-email)
- [Troubleshooting](#troubleshooting)
## Searching for GPG Keys
I publish all my keys to <https://keys.openpgp.org>
```bash
# Search for an arbitrary user's key
gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys <email>
```
## Importing GPG Keys
```bash
# First, locate a key
gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys git@ducoterra.net
# Or import a key file
gpg --import keys/git_ducoterra_net.pub
# Sign the key with your own if you trust it
gpg -u 7FC1B29700114F4FC589E7065FDDCFA544D77B8C --sign-key git@ducoterra.net
# Then set the trust of the key
# full == I trust other keys signed by this key
# undefined == I'm choosing to defer to later
# never == I don't trust this key
gpg --quick-set-ownertrust git@ducoterra.net full
```
## Generate GPG Keys
```bash
# Make sure you have pinentry installed
dnf install pinentry
# Generate the key. The defaults should be good enough.
gpg --full-generate-key
# Verify your key was created
gpg --list-secret-keys
# Edit a key in your keyring
gpg --edit-key <id>
```
## Change Key Password
```bash
# You can see all the --edit-key options with `man gpg` and search for '--edit-key'
# You can also type "?" to see help
gpg --edit-key 7FC1B29700114F4FC589E7065FDDCFA544D77B8C
> passwd
> quit
```
## Renewing GPG Keys
You should set an expiration for your keys. You can extend that expiration (or
set it on existing keys) with:
```bash
# Note 2y == "expire 2 years from now"
# You can also set '0' for no expiration or use 'd' days and 'w' for weeks
gpg --quick-set-expire <key id> 2y
# Don't forget to republish your keys with new expirations
gpg --keyserver https://keys.openpgp.org --send-keys <key id>
```
## Export GPG Keys
```bash
# Export your public key in ascii format
gpg -o keys/git-ducoterra-net.gpg --export -a 'git@ducoterra.net'
# Export your private key (careful with this one)
gpg -o git-ducoterra-net.key --export-secret-keys -a 'git@ducoterra.net'
```
## GPG Key Servers
Edit `~/.gnupg/gpg.conf` and add `keyserver hkps://keys.openpgp.org`
```bash
# Sync keys with keyserver
gpg --refresh-keys
# Search for a user's key
gpg --auto-key-locate hkps://keys.openpgp.org --locate-keys git@ducoterra.net
# Export your public key
gpg --export -a 'git@ducoterra.net' > keys/git_ducoterra_net.pub
# Inspect a public key with
gpg --show-key keys/git_ducoterra_net.pub
# Upload a key to a keyserver
# NOTE: if you upload your key to keys.openpgp.org with this command, the email
# won't be searchable. You'll need to Use the upload page
# (https://keys.openpgp.org/upload) and upload the key file generated above
# instaed. You'll need to verify your email after upload for it to be searchable.
gpg --keyserver https://keys.openpgp.org --send-keys <key id>
```
## Delete GPG Keys
```bash
# Delete a public key
gpg --delete-keys <email>
# Delete a secret key
# Note, you'll also need to delete the public key after this command
gpg --delete-secret-keys <email>
```
## Using GPG keys
### Signing Files
```bash
# -s --sign
# -a --armor
# -u --local-user
# -e --encrypt
# -b --detach-sign
# -o --output
# Sign a file and compress it. Output will be binary
gpg -u 7FC1B29700114F4FC589E7065FDDCFA544D77B8C -o README.sig -s README.md
# Decompress and verify the signed file
gpg --output README.md --decrypt README.sig
# Sign a file without compressing it. Useful for serving/sending signed documents without requiring decompression
gpg -u 7FC1B29700114F4FC589E7065FDDCFA544D77B8C --clearsign -s -a README.md
# Verify the document (ignore the WARNING about detached signature)
gpg --verify README.md.asc
# Create a detached signature. The most practical option since you don't need to modify the original file.
gpg -u 7FC1B29700114F4FC589E7065FDDCFA544D77B8C -o README.md.sig -b README.md
# Verify the detached signature
gpg --verify README.md.sig README.md
```
### Encrypting Files
```bash
# -s --sign
# -a --armor
# -u --local-user
# -e --encrypt
# Encrypt a file with someone's public key
gpg -o README.md.gpg -e --recipient git@ducoterra.net README.md
# Decrypt the file if you have the private key
gpg -o README.md --decrypt README.md.gpg
# Encrypt with a password
gpg -o README.md.gpg --symmetric README.md
# Decrypt with a password
gpg --decrypt README.md.gpg
```
## Yubikey
See [Yubikey Notes](/active/device_yubikey/yubikey.md#gpg)
## Linux Apps
### Evolution Email
1. Edit -> Preferences -> Double click the account with a GPG key -> Security ->
OpenPGP Key ID
2. Always sign outgoing messages
3. Advanced Options -> Always trust keys in my keyring when encrypting
## Android Apps
### OpenKeychain
### Fair Email
## Troubleshooting
"error receiving key from agent: No such file or directory - skipped"
"error obtaining lock... process is in use by..."
In general, the easiest way to fix gpg problems is by killing and restarting the agent.
```bash
gpgconf --kill gpg-agent
gpgconf --reload gpg-agent
```

193
active/software_k0s/k0s.md Normal file
View File

@@ -0,0 +1,193 @@
# K0s
- [K0s](#k0s)
- [Install Single Node Cluster](#install-single-node-cluster)
- [Install Multi Node Cluster](#install-multi-node-cluster)
- [Uninstall](#uninstall)
- [Install Metallb](#install-metallb)
- [Uninstall Metallb](#uninstall-metallb)
- [Install OpenEBS](#install-openebs)
## Install Single Node Cluster
<https://docs.k0sproject.io/v0.11.0/k0s-single-node/>
```bash
# Trust traffic on podCIDR and serviceCIDR subnets
firewall-cmd --permanent --zone=trusted \
--add-source=10.244.0.0/16 \
--add-source=10.96.0.0/12
# Set default zone to drop packets
firewall-cmd --set-default-zone=drop
# Allow k0s ports
firewall-cmd --permanent --zone=drop \
--add-port=22/tcp \
--add-port=6443/tcp \
--add-port=179/tcp \
--add-port=4789/udp \
--add-port=10250/tcp \
--add-port=9443/tcp \
--add-port=8132/tcp \
--add-port=112/tcp
# Apply firewall
firewall-cmd --reload
# Install k0s cli
curl -sSLf https://get.k0s.sh | sudo sh
# Setup the config
k0s config create > k0s.yaml
# Install single node cluster controller/node
k0s install controller -c k0s.yaml --enable-worker --no-taints
# Start and enable the service
systemctl enable --now k0scontroller
# Enable bash completion
echo 'source <(k0s completion bash)' >>~/.bashrc
source ~/.bashrc
# Make an admin user
mkdir ~/.kube
k0s kubeconfig create --groups "system:m asters" admin > ~/.kube/config
# Remove the taint that prevents scheduling on the controller
kubectl edit node
```
## Install Multi Node Cluster
<https://docs.k0sproject.io/v0.11.0/k0s-multi-node/>
Install the controller on the controller machine
```bash
# Set default zone to drop packets
firewall-cmd --set-default-zone=drop
# Allow k0s ports
firewall-cmd --permanent --zone=drop \
--add-port=22/tcp \
--add-port=6443/tcp \
--add-port=179/tcp \
--add-port=4789/udp \
--add-port=10250/tcp \
--add-port=9443/tcp \
--add-port=8132/tcp \
--add-port=112/tcp
# Apply firewall
firewall-cmd --reload
# Install k0s cli
curl -sSLf https://get.k0s.sh | sudo sh
# Save default config
k0s config create > k0s.yaml
# Install the controller
k0s install controller
# Enable the controller
systemctl enable --now k0scontroller
# Enable bash completion
echo 'source <(k0s completion bash)' >>~/.bashrc
source ~/.bashrc
# Make an admin user (scp ~/.kube/config to your operator machine)
# kubectl config set-context --current --namespace kube-system
mkdir ~/.kube
k0s kubeconfig create --groups "system:masters" admin > ~/.kube/config
# Generate a worker join token
k0s token create --role=worker > worker0-token
```
Now on the worker machine, install the worker
```bash
# Trust traffic on podCIDR and serviceCIDR subnets
firewall-cmd --permanent --zone=trusted \
--add-source=10.244.0.0/16 \
--add-source=10.96.0.0/12
# Apply firewall
firewall-cmd --reload
# On the operator, copy the token file from the controller to the worker
scp vm-k0s-controller:worker0-token vm-k0s-worker:token-file
# Install k0s cli
curl -sSLf https://get.k0s.sh | sudo sh
# Join the worker
k0s install worker --token-file token-file
# Start the service
systemctl enable --now k0sworker
# Enable bash completion
echo 'source <(k0s completion bash)' >>~/.bashrc
source ~/.bashrc
```
## Uninstall
```bash
systemctl stop k0scontroller
k0s reset
reboot
```
## Install Metallb
<https://docs.k0sproject.io/v1.26.0+k0s.0/examples/metallb-loadbalancer/>
1. Create a VLAN with a dedicated subnet for Metallb. Disable DHCP.
2. Attach this new VLAN to your worker nodes
3. Assign the worker nodes an address within the created network.
4. Install Metallb. Check `active/software_k0s/metallb-address-pool.yaml` before proceeding.
```bash
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.15.2/config/manifests/metallb-native.yaml
kubectl apply -f active/software_k0s/metallb-address-pool.yaml
```
### Uninstall Metallb
```bash
kubectl delete -f active/software_k0s/metallb-address-pool.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.15.2/config/manifests/metallb-native.yaml
```
## Install OpenEBS
<https://docs.k0sproject.io/stable/examples/openebs/>
Add the openebs extension
```yaml
extensions:
helm:
repositories:
- name: openebs-internal
url: https://openebs.github.io/charts
charts:
- name: openebs
chartname: openebs-internal/openebs
version: "3.9.0"
namespace: openebs
order: 1
values: |
localprovisioner:
hostpathClass:
enabled: true
isDefaultClass: false
```

View File

@@ -0,0 +1,16 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: first-pool
namespace: metallb-system
spec:
addresses:
- 192.168.123.100-192.168.123.254
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: default
namespace: metallb-system

View File

@@ -0,0 +1,54 @@
name = "fedora-42-base"
description = "Fedora 42 Base Installation"
version = "0.0.1"
distro = "fedora-42"
modules = []
groups = []
[customizations]
hostname = "f42-base"
[[customizations.disk.partitions]]
type = "btrfs"
minsize = "32 GiB"
[[customizations.disk.partitions.subvolumes]]
name = "root"
mountpoint = "/"
[[customizations.disk.partitions.subvolumes]]
name = "home"
mountpoint = "/home"
[customizations.timezone]
timezone = "America/New_York"
[[customizations.sshkey]]
user = "root"
key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGQa781Qj8mNlUdRquFFqg0O2ornG9SBHe705y4+1vPI ssh@ducoterra.net"
[customizations.firewall.services]
enabled = ["ssh"]
[customizations.services]
enabled = ["sshd"]
[[packages]]
name = "bash-completion"
version = "*"
[[packages]]
name = "tmux"
version = "*"
[[packages]]
name = "openssh-server"
version = "*"
[[packages]]
name = "vim"
version = "*"
[[packages]]
name = "git"
version = "*"

View File

@@ -0,0 +1,77 @@
name = "fedora-43-base"
description = "Fedora 43 Base Installation"
version = "0.0.1"
distro = "fedora-43"
modules = []
groups = []
[customizations]
hostname = "f43-base"
[[customizations.disk.partitions]]
type = "btrfs"
minsize = "32 GiB"
[[customizations.disk.partitions.subvolumes]]
name = "root"
mountpoint = "/"
[[customizations.disk.partitions.subvolumes]]
name = "home"
mountpoint = "/home"
[customizations.timezone]
timezone = "America/New_York"
[[customizations.user]]
name = "ducoterra"
password = "$6$QqOw6ktp6aiPy5kX$cpN.oar4CiofH0PpxyveJgkjsRFGnZ5ykOX/50DcJyU3hZFxc5R3SASemNW6m3jceLGgZrQHyALQl8SgtcNO90"
key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGQa781Qj8mNlUdRquFFqg0O2ornG9SBHe705y4+1vPI ssh@ducoterra.net"
home = "/home/ducoterra/"
shell = "/usr/bin/bash"
groups = ["wheel"]
uid = 1000
[customizations.firewall.services]
enabled = ["ssh"]
[customizations.services]
enabled = ["sshd"]
[[packages]]
name = "bash-completion"
version = "*"
[[packages]]
name = "tmux"
version = "*"
[[packages]]
name = "openssh-server"
version = "*"
[[packages]]
name = "vim"
version = "*"
[[packages]]
name = "git"
version = "*"
[[customizations.files]]
path = "/root/.inputrc"
mode = "0644"
user = "root"
group = "root"
data = """
"\\C-h": backward-kill-word
"""
[[customizations.files]]
path = "/home/ducoterra/.inputrc"
mode = "0644"
user = "root"
group = "root"
data = """
"\\C-h": backward-kill-word
"""

Some files were not shown because too many files have changed in this diff Show More