moving everything to active or retired vs incubating and graduated
All checks were successful
Reese's Arch Toolbox / build-and-push-arch-toolbox (push) Successful in 14s

This commit is contained in:
2025-04-19 18:46:40 -04:00
parent 6e393d90ee
commit ef9104c796
234 changed files with 456 additions and 244 deletions

View File

@@ -0,0 +1,25 @@
alex alex-wordpress Bound pvc-0a8da478-b04c-46e7-a940-40b782b89892 10Gi RWO zfs-iscsi-enc0 <unset> 245d
alex data-alex-wordpress-mariadb-0 Bound pvc-1d8aeac7-a441-4059-9607-ee53d2344e41 8Gi RWO zfs-iscsi-enc0 <unset> 245d
courtniecraft courtniecraft Bound pvc-76f21642-5c85-4c0f-b406-42b4f8d65509 32Gi RWO zfs-nfs-enc1 <unset> 236d
gitea-staging data-gitea-postgresql-0 Bound pvc-775359a0-7a83-4ddd-8113-d1535a2fdbd3 10Gi RWO zfs-iscsi-enc1 <unset> 164d
gitea-staging data-gitea-staging-0 Bound pvc-4407c174-1778-4ade-9c1d-695d5c37e278 10Gi RWO zfs-iscsi-enc0 <unset> 164d
gitea-staging redis-data-gitea-redis-cluster-0 Bound pvc-f2f2c4dd-65e7-4d20-afe3-6b7938dda419 8Gi RWO zfs-iscsi-enc1 <unset> 164d
gitea-staging redis-data-gitea-redis-cluster-1 Bound pvc-6149eb96-70c5-4d1f-9bb4-5169e22030ce 8Gi RWO zfs-iscsi-enc1 <unset> 164d
gitea-staging redis-data-gitea-redis-cluster-2 Bound pvc-bdacee52-e5dd-43f9-9da2-679ccff183bf 8Gi RWO zfs-iscsi-enc1 <unset> 164d
gitea data-gitea-0 Bound pvc-d5090258-cf20-4f2e-a5cf-330ac00d0049 10Gi RWO zfs-iscsi-enc0 <unset> 471d
gitea data-gitea-postgresql-0 Bound pvc-1e39ff4f-35ab-44a5-bf58-1a23c0639ef1 10Gi RWO zfs-iscsi-enc1 <unset> 458d
gitea gitea-shared-storage Bound pvc-44d60e8a-2d69-430e-ba4d-aeede0c1ab0a 10Gi RWO zfs-iscsi-enc0 <unset> 299d
gitea redis-data-gitea-redis-cluster-0 Bound pvc-6b5c9611-fc03-431d-8bf2-9a0d3c90ef5a 8Gi RWO zfs-iscsi-enc1 <unset> 14d
gitea redis-data-gitea-redis-cluster-1 Bound pvc-5e01df53-ffd1-4fb6-a58e-ac99968a1a2c 8Gi RWO zfs-iscsi-enc1 <unset> 14d
gitea redis-data-gitea-redis-cluster-2 Bound pvc-97ea5dc3-e86a-4582-a8f7-60ed4545ec51 8Gi RWO zfs-iscsi-enc1 <unset> 14d
grafana grafana-pvc Bound pvc-624017d3-7b0c-44f7-bf9f-a6a74515526f 1Gi RWO zfs-iscsi-enc1 <unset> 171d
jellyfin jellyfin-cache Bound pvc-6a7fde87-39da-4cd7-a36d-127655131c10 128Gi RWO zfs-iscsi-enc1 <unset> 389d
jellyfin jellyfin-config Bound pvc-5938d42c-bdc4-40b4-9396-c3f3f91b39cc 8Gi RWO zfs-iscsi-enc0 <unset> 389d
modcraft modcraft Bound pvc-3bf29bfa-db3b-4c3d-8cd6-2044365f4672 32Gi RWX zfs-nfs-enc1 <unset> 263d
nextcloud nextcloud-data Bound pvc-6eef3977-cacb-4d2b-b9a5-005a2ce00717 2Ti RWO zfs-iscsi-enc0 <unset> 472d
nextcloud nextcloud-html-iops Bound pvc-68e20a7f-eddf-43f3-80fc-f007dbb4e1c1 16Gi RWO zfs-iscsi-enc1 <unset> 438d
nextcloud nextcloud-postgres-iops Bound pvc-462f4c54-e2d5-4380-b07e-2bdc5a019e5a 32Gi RWO zfs-iscsi-enc1 <unset> 438d
nextcloud nextcloud-redis-iops Bound pvc-30ba95ad-9798-46f3-8d08-7263e68e6c9f 32Gi RWO zfs-nfs-enc1 <unset> 236d
nimcraft nimcraft Bound pvc-ccaace81-bd69-4441-8de1-3b2b24baa7af 32Gi RWO zfs-nfs-enc1 <unset> 265d
snapdrop snapdrop-config Bound pvc-15a6e867-1d47-4a1d-a8e5-f26974a71381 8Gi RWO zfs-iscsi-enc0 <unset> 390d
testcraft testcraft Bound pvc-f88c66fa-feb2-4ff5-85e9-d1bbafa3cc78 32Gi RWO zfs-nfs-enc1 <unset> 264d

View File

@@ -0,0 +1,6 @@
# Democratic CSI
## Retirement
This project has been deprecated in favor of local storage provided by rancher. Local storage is
much better for single node clusters (which is all I run right now) since it's far more performant.

View File

@@ -0,0 +1,69 @@
csiDriver:
name: "driveripper.zfs-gen-nfs-enc1"
storageClasses:
- name: zfs-gen-nfs-enc1
defaultClass: false
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: nfs
mountOptions:
- async
- noatime
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: zfs-generic-nfs
sshConnection:
host: driveripper.reeseapps.com
port: 22
username: democratic
privateKey: ""
zfs:
cli:
sudoEnabled: true
paths:
zfs: /usr/sbin/zfs
zpool: /usr/sbin/zpool
sudo: /usr/bin/sudo
chroot: /usr/sbin/chroot
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc1/dcsi/nfs
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: enc1/dcsi/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
# datasetPermissionsAcls:
# - "-m everyone@:full_set:allow"
#- "-m u:kube:full_set:allow"
nfs:
# https://docs.oracle.com/cd/E23824_01/html/821-1448/gayne.html
# https://www.hiroom2.com/2016/05/18/ubuntu-16-04-share-zfs-storage-via-nfs-smb/
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
#sharenfs: "rw,no_subtree_check,no_root_squash"
sharenfs: "on"
# share: ""
shareHost: "driveripper.reeselink.com"

View File

@@ -0,0 +1,85 @@
csiDriver:
name: "driveripper.zfs-iscsi-enc0"
# add note here about volume expansion requirements
storageClasses:
- name: zfs-iscsi-enc0
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
# for block-based storage can be ext3, ext4, xfs
fsType: xfs
mountOptions: []
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: freenas-api-iscsi
instance_id:
httpConnection:
protocol: https
host: driveripper.reeseapps.com
port: 443
apiKey: ""
allowInsecure: false
apiVersion: 2
zfs:
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc0/dcsi/apps
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: enc0/dcsi/snaps
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "democratic-csi-server.reeselink.com"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix: "dcsi-"
nameSuffix: "-enc0"
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
- targetGroupPortalGroup: 7
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1
# None, CHAP, or CHAP Mutual
targetGroupAuthType: None
# get the correct ID from the "Authorized Access" section of the UI
# only required if using Chap
targetGroupAuthGroup:
extentCommentTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
extentInsecureTpc: true
extentXenCompat: false
extentDisablePhysicalBlocksize: true
# 512, 1024, 2048, or 4096,
extentBlocksize: 512
# "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
extentRpm: "SSD"
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@@ -0,0 +1,84 @@
csiDriver:
name: "driveripper.zfs-iscsi-enc1"
# add note here about volume expansion requirements
storageClasses:
- name: zfs-iscsi-enc1
defaultClass: false
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
# for block-based storage can be ext3, ext4, xfs
fsType: xfs
mountOptions: []
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: freenas-api-iscsi
instance_id:
httpConnection:
protocol: https
host: driveripper.reeseapps.com
port: 443
apiKey: ""
allowInsecure: false
zfs:
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc1/dcsi/apps
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: enc1/dcsi/snaps
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "democratic-csi-server.reeselink.com"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix: "dcsi-"
nameSuffix: "-enc1"
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
- targetGroupPortalGroup: 7
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1
# None, CHAP, or CHAP Mutual
targetGroupAuthType: None
# get the correct ID from the "Authorized Access" section of the UI
# only required if using Chap
targetGroupAuthGroup:
extentCommentTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
extentInsecureTpc: true
extentXenCompat: false
extentDisablePhysicalBlocksize: true
# 512, 1024, 2048, or 4096,
extentBlocksize: 512
# "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
extentRpm: "SSD"
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@@ -0,0 +1,53 @@
csiDriver:
name: "driveripper.zfs-nfs-enc1"
storageClasses:
- name: zfs-nfs-enc1
defaultClass: false
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: nfs
mountOptions:
- async
- noatime
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: freenas-api-nfs
instance_id:
httpConnection:
protocol: https
host: driveripper.reeseapps.com
port: 443
apiKey: ""
allowInsecure: true
zfs:
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc1/dcsi/nfs
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: enc1/dcsi/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: democratic-csi-server.reeselink.com
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks:
- "fd00:fd41:d0f1:1010::0/64"
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@@ -0,0 +1,2 @@
FROM nginx
COPY charts /usr/share/nginx/html

View File

@@ -0,0 +1,14 @@
apiVersion: v1
entries:
repository:
- apiVersion: v2
appVersion: 1.16.0
created: "2023-03-25T14:59:38.161362245-04:00"
description: A Helm repository chart
digest: 41013d6705233e1b686cfc9ed6a922f62c63e5ac4e8f1a7405ea1a9042d7b3ec
name: repository
type: application
urls:
- repository-0.1.0.tgz
version: 0.1.0
generated: "2023-03-25T14:59:38.160918033-04:00"

View File

@@ -0,0 +1,8 @@
version: '3'
services:
repo:
image: ducoterra/helm-repository:latest
build: .
ports:
- 8080:80

View File

@@ -0,0 +1,87 @@
# Personal Helm Repository
A helm repository is really, really simple. It's a static website that serves, at least,
a file called `index.yaml`. This file is auto-generated (see [Adding a chart](#adding-a-chart)
below) via helm commands.
Charts are added by copying their respective tgz archives into the webserver's content
directory. For example: /usr/share/nginx/html when using nginx.
Since charts are usually small you don't need a volume, you can copy the contents of your
charts directory into the webserver's docker image and serve a completely stateless repository.
You could potentially have a repository for each project or have an org-wide repository
that gets built automatically with something like gitlab/github pages.
## Gettings started
Create a folder called "charts".
Run `helm repo index charts`. An `index.yaml` will be created. It shouldn't have anything
in it.
Now you can run `podman-compose build` to create the image. This will copy the charts
folder into an nginx container. `podman-compose push` will upload the container to the
registry.
## Deploying the Repository
We can use the local chart (for now) to deploy the chart image.
```bash
helm upgrade --install \
chart-repository \
./repository \
--namespace chart-repository \
--create-namespace
```
## Using the Repository
You can add the repository to your local helm client like any other:
```bash
helm repo add reeseapps https://charts.reeseapps.com
```
You can view the existing charts with
```bash
helm search repo reeseapps
```
Though nothing will show up right now...
## Adding a chart
We can add a chart by copying its .tgz package into `charts/`. For example:
```bash
helm package ./repository
mv repository-0.1.0.tgz charts/
```
Now recreate the index with
```bash
helm repo index charts
```
You should see a new entry in the index.yaml.
## Updating the image
Now that you have something in your index, we can update the image by following the
same process we used to deploy it:
```bash
podman-compose build
podman-compose push
helm upgrade --install \
chart-repository \
./repository \
--namespace chart-repository \
--create-namespace
```
Now run `helm repo update` and `helm search repo reeseapps`. You should see the new
chart added.

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: repository
description: A Helm repository chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,73 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ .Release.Name }}
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: {{ .Release.Name }}
spec:
containers:
- name: nginx
image: ducoterra/helm-repository:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
resources:
requests:
memory: "1Gi"
cpu: "1m"
limits:
memory: "1Gi"
cpu: "1"
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: {{ .Release.Name }}
ports:
- name: http
protocol: TCP
port: 80
targetPort: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
spec:
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: http
tls:
- hosts:
- {{ .Values.domain }}
secretName: {{ .Release.Name }}-tls-cert

View File

@@ -0,0 +1 @@
domain: charts.reeseapps.com

View File

@@ -0,0 +1,391 @@
# FreeIPA
- [FreeIPA](#freeipa)
- [Notes](#notes)
- [Quickstart Debugging Setup](#quickstart-debugging-setup)
- [Quickstart Production Setup](#quickstart-production-setup)
- [Tips](#tips)
- [Adding a user](#adding-a-user)
- [PIV](#piv)
- [Sources](#sources)
- [Set up PIV Auth on the Host where you Installed FreeIPA](#set-up-piv-auth-on-the-host-where-you-installed-freeipa)
- [Note for VMs](#note-for-vms)
- [Set up PIV](#set-up-piv)
- [Set up PIV on Another Host](#set-up-piv-on-another-host)
- [PIV Smart Card (Yubikey Manual) WIP](#piv-smart-card-yubikey-manual-wip)
- [PIV Smart Card (Taglio) WIP](#piv-smart-card-taglio-wip)
- [Finding devices in sysfs WIP](#finding-devices-in-sysfs-wip)
- [Finding p11 devices WIP](#finding-p11-devices-wip)
- [Arch Client WIP](#arch-client-wip)
An AD Server.
This guide assumes Fedora 40+.
## Notes
For yubikey manager appimage extend app not responding timeout to 60 seconds
```bash
gsettings set org.gnome.mutter check-alive-timeout 0
```
## Quickstart Debugging Setup
This sets up a freeipa server on RHEL 9 for debugging purposes. No DNS records required
and mostly for local use.
```bash
# Don't forget to install tmux
dnf install -y tmux
```
Install FreeIPA:
```bash
# Hostname needs to be an address that isn't managed by DNS (FreeIPA will check)
# We can get away with *.name.reeselink.com because "name.reeselink.com" doesn't resolve.
export HNAME="freeipa.reese.reeselink.com"
export IPS=$(hostname -I)
# Add "<ip address> freeipa.<unique_name>.reeselink.com" to the /etc/hosts
# Example: "10.3.128.130 freeipa.reese.reeselink.com" <- note, this address *cannot* already exist.
# This will allow us to access our AD server without DNS complication
for item in $IPS; do echo "$item $HNAME" >> /etc/hosts; done
# FreeIPA checks for this
hostnamectl set-hostname $HNAME --static
hostname $HNAME
# Turning off selinux isn't required, but is nice for debugging.
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=permissive/g' /etc/selinux/config
# Install FreeIPA with the dns packages. We *could* use this server our DNS server if
# we wanted. This will prevent DCV from talking to our auth endpoint, however, so isn't
# recommended.
dnf install ipa-server-dns bind-dyndb-ldap -y
# IPA Server install
# Note - select defaults for almost everything except:
# 1. We want DNS, so say yes to that
# 2. We don't need to scan for additional zones
ipa-server-install --setup-dns
# Install flatpak
dnf install flatpak
flatpak remote-add --if-not-exists flathub https://dl.flathub.org/repo/flathub.flatpakrepo
flatpak install it.mijorus.gearlever
```
1. Download the yubikey manager at <https://www.yubico.com/support/download/yubikey-manager/>
2. Open it in Gear Lever and add to app menu
```bash
# Install pcscd
dnf install pcsc-lite opensc
# Start the pcscd server
systemctl enable --now pcscd
```
Now skip to [Get PIV Working](#piv)
## Quickstart Production Setup
<https://www.freeipa.org/page/Quick_Start_Guide>
- Set your hostname to your server's fqdn with `hostnamectl hostname freeipa.reeselink.com`
- Ensure you have a DNS entry pointing to your host
- Open ports:
```bash
firewall-cmd --add-service=freeipa-ldap --add-service=freeipa-ldaps
firewall-cmd --add-service=freeipa-ldap --add-service=freeipa-ldaps --permanent
```
- Set a permanet DNS resolver: `sudo echo "nameserver 1.1.1.1" > /etc/resolv.conf`
- Disable NetworkManager DNS management
```bash
vim /etc/NetworkManager/NetworkManager.conf
[main]
dns=none
```
Note, if installing for local use only, set /etc/hosts and reply "yes" to configure dns.
```bash
vim /etc/hosts
192.168.122.100 freeipa.reeselink.com
```
- Restart NetworkManager: `systemctl restart NetworkManager`
- Ensure resolv.conf hasn't been repopulated: `cat /etc/resolv.conf`
- Install freeipa: `dnf install -y freeipa-server freeipa-server-dns`
- Install the server (mostly choose defaults and sane options): `ipa-server-install`
- Authenticate as admin: `kinit admin`
Now skip to [Get PIV Working](#piv)
## Tips
```bash
# Install gnome desktop on Fedora server
dnf group install gnome-desktop
systemctl enable gdm
reboot
# Install spice tools for clipboard sync
sudo dnf install spice-vdagent
reboot
```
## Adding a user
- `ipa user-add`
- `ipa passwd <user>`
- `kinit <user>`
## PIV
### Sources
Self signed piv cert generation
<https://developers.yubico.com/yubico-piv-tool/Actions/key_generation.html>
Explaing Yubikey key slots
<https://developers.yubico.com/PIV/Introduction/Certificate_slots.html>
Ultimate guide to creating a CA
<https://checkmk.com/linux-knowledge/creating-server-certificate>
CA Public directory
<https://nextcloud.reeseapps.com/s/twRAFeWJZKJ3Qw5>
Convert pem to der
<https://knowledge.digicert.com/solution/how-to-convert-a-certificate-into-the-appropriate-format>
Add smart card login to extended key use
<https://docs.openssl.org/master/man5/x509v3_config/#key-usage>
This should set up a CA and a signed cert for your yubikey.
<https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/managing_smart_card_authentication/configuring-idm-for-smart-card-auth_managing-smart-card-authentication#conf-idm-server-for-smart-card-auth_configuring-idm-for-smart-card-auth>
### Set up PIV Auth on the Host where you Installed FreeIPA
#### Note for VMs
"virt-manager USB redirection error: Device is in use by another application"
You won't be able to pass through a smart card without stopping pcscd on the host.
```bash
systemctl stop pcscd.socket && systemctl stop pcscd
```
#### Set up PIV
```bash
# copy the CA and run the ipa-advise script that sets up smartcard auth
cp /etc/ipa/ca.crt ca.crt
sudo -i
kinit admin
ipa-advise config-server-for-smart-card-auth > config-server-for-smart-card-auth.sh
chmod +x config-server-for-smart-card-auth.sh
./config-server-for-smart-card-auth.sh ca.crt
```
Allow smart card auth in browser with these changes `/etc/httpd/conf.d/ssl.conf`
```conf
...
SSLOCSPEnable off
...
SSLProtocol all -TLSv1 -TLSv1.1 -TLSv1.3
...
```
And restart httpd:
```bash
systemctl restart httpd
```
Then user -> actions -> new certificate. Paste in the yubikey CSR. Download then upload cert to the yubikey.
Auth with smart card
```bash
kinit -X X509_user_identity='PKCS11:opensc-pkcs11.so' idm_user
```
Or with pam
```bash
cp /etc/ipa/ca.crt /etc/sssd/pki/sssd_auth_ca_db.pem
dnf install -y sssd-tools
authselect enable-feature with-smartcard
# required: authselect enable-feature with-smartcard-required
# lock on remove: authselect enable-feature with-smartcard-lock-on-removal
# set "pam_cert_auth = True" in [pam] section of /etc/sssd/sssd.conf
systemctl restart sssd
sssctl user-checks -s gdm-smartcard "ducoterra" -a auth
```
### Set up PIV on Another Host
**NOTE**: Make sure, if you're joining with a host that already has a user with a username that
exists in your freeipa server, that **you set the UID in freeipa to match the UID on
the system**. Otherwise joining will cause all sorts of problems.
```bash
# This package is required to join a FreeIPA realm
# I think it helps collect and save the relevant data from the FreeIPA
# server when joining (server CA, NTP sync, SSH keys, etc.)
dnf install -y freeipa-client
hostnamectl set-hostname client.reese.reeselink.com
# OPTIONAL: You need to leave any existing AD realm before joining a new one
# realm leave <some-realm>
```
Add the freeipa server to our /etc/hosts so we don't need to set up DNS
```bash
vim /etc/hosts
`192.168.122.195 freeipa.reese.reeselink.com`
# This should populate /etc/krb5.conf and /etc/sssd/sssd.conf
realm join freeipa.reese.reeselink.com -v
# AD should be configured to create the user's home dir, but to be safe
export freeipa_user=ducoterra
mkdir /home/$freeipa_user
chown $freeipa_user:$freeipa_user /home/$freeipa_user
# Check login
su - $freeipa_user
# With kinit
kinit -X X509_user_identity='PKCS11:opensc-pkcs11.so' idm_user
# With sssctl
cp /etc/ipa/ca.crt /etc/sssd/pki/sssd_auth_ca_db.pem
dnf install -y sssd-tools
authselect enable-feature with-smartcard
# required: authselect enable-feature with-smartcard-required
# lock on remove: authselect enable-feature with-smartcard-lock-on-removal
# set "pam_cert_auth = True" in [pam] section of /etc/sssd/sssd.conf
systemctl restart sssd
sssctl user-checks -s gdm-smartcard "ducoterra" -a auth
```
### PIV Smart Card (Yubikey Manual) WIP
```bash
openssl req -x509 -config openssl-ca.cnf -days 3650 -newkey rsa:4096 -sha256 -nodes -out cacert.pem -outform PEM
touch index.txt
echo '01' > serial.txt
# Sign the csr generated in slot 9a on your yubikey
openssl ca -config openssl-ca.cnf -policy signing_policy -extensions signing_req -out reese-crt.pem -infiles reese-csr.pem
# Inspect
openssl x509 -in reese-crt.pem -text -noout
# Convert to microsoft format
openssl x509 -outform der -in reese-crt.pem -out reese-crt-der.cer
```
OCSP Server
<https://bhashineen.medium.com/create-your-own-ocsp-server-ffb212df8e63>
```bash
openssl req -new -nodes -out ocspSigning.csr -keyout ocspSigning.key
openssl ca -keyfile rootCA.key -cert rootCA.crt -in ocspSigning.csr -out ocspSigning.crt -config validation.conf
```
### PIV Smart Card (Taglio) WIP
<https://github.com/OpenSC/OpenSC/wiki/Using-pkcs11-tool-and-OpenSSL>
```bash
openssl genrsa -aes256 -out testkey.key 2048
openssl rsa -in testkey.key -pubout -out testkey-public.key
openssl req -new -key testkey.key -out testkey.csr
# Sign with CA
```
### Finding devices in sysfs WIP
```bash
# Try this
ls /dev/input/by-id
udevadm info --query=all --name='/dev/input/by-id/usb-Yubico_YubiKey_OTP+FIDO+CCID-event-kbd'
# Look for E: DEVPATH and put /sys in front of it
# For example:
cd /sys/devices/pci0000:00/0000:00:02.1/0000:02:00.0/usb1/1-3/1-3:1.0/0003:1050:0407.0004/input/input10/event6
# Or alternatively
find /sys -name 'removable'
cd /sys/devices/pci0000:00/0000:00:02.1/0000:02:00.0/usb1/1-3
```
### Finding p11 devices WIP
```bash
dnf install python3-pip python3-devel gcc
python3 -m venv venv
source venv/bin/activate
pip install -U pip
pip install python-pkcs11
export PKCS11_MODULE=/usr/lib64/p11-kit-proxy.so
```
## Arch Client WIP
- Install krb5: `pacman -S krb5`
- Edit /etc/krb5.conf to match your server
```conf
vim /etc/krb5.conf
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
default_realm = REESELINK.COM
dns_lookup_realm = false
dns_lookup_kdc = true
rdns = false
ticket_lifetime = 24h
forwardable = true
udp_preference_limit = 0
default_ccache_name = KEYRING:persistent:%{uid}
[realms]
REESELINK.COM = {
kdc = freeipa.reeselink.com:88
master_kdc = freeipa.reeselink.com:88
kpasswd_server = freeipa.reeselink.com:464
admin_server = freeipa.reeselink.com:749
default_domain = reeselink.com
pkinit_anchors = FILE:/var/lib/ipa-client/pki/kdc-ca-bundle.pem
pkinit_pool = FILE:/var/lib/ipa-client/pki/ca-bundle.pem
}
[domain_realm]
.reeselink.com = REESELINK.COM
reeselink.com = REESELINK.COM
freeipa.reeselink.com = REESELINK.COM
```
- Log in with your user: `kinit <user>`
- List your tickets: `klist`

View File

@@ -0,0 +1,22 @@
- name: Install iperf3 service
hosts: yellow
vars_files:
- vars.yaml
tasks:
- name: Ensure iperf3 is installed
ansible.builtin.dnf:
name:
- iperf3
state: present
- name: Copy iperf3.service
template:
src: iperf3.service
dest: /etc/systemd/system/iperf3.service
owner: root
group: root
mode: '0644'
- name: Reload iperf3 timer
ansible.builtin.systemd_service:
state: restarted
name: iperf3.service
enabled: true

View File

@@ -0,0 +1,7 @@
# Iperf3
## Install
```bash
ansible-playbook -i ansible/inventory.yaml active/systemd_iperf3/install_iperf3.yaml
```

View File

@@ -0,0 +1,9 @@
[Unit]
Description=iperf3 server
After=syslog.target network.target auditd.service
[Service]
ExecStart=/usr/bin/iperf3 -s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,26 @@
# IPv4 Proxy
**DEPRECATED** Replaced by [Caddy](/active/podman_caddy/caddy.md)
This project aims to serve those without an IPv6 ISP by forwarding IPv4 requests to the
correct destination. This is accomplished by SSL preread and port mapping. This service
is intended only for publicly accessible services.
## DDNS
This project pairs with the ddns service. Set that up first!
## Updating IPv4 Proxy Records
1. In `ddns` create a new record in the `reeseapps_record_template.json`
2. Apply the new record with ansible
3. Update `vars.yaml` in this project
4. Run the following ansible script:
```bash
ansible-playbook -i ansible/inventory.yaml active/systemd_ipv4-proxy/nginx.yaml
```
## Logging
You can tail all the nginx logs with `ssh yellow 'tail -f /var/log/nginx/*.log'`

View File

@@ -0,0 +1,53 @@
load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
worker_processes auto;
events {
worker_connections 1024;
}
stream {
log_format ssl '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward: $map_forward_ssl | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
log_format port '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward: $map_forward_port | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
# Map all SSL parsed server names to hosts
map $ssl_preread_server_name $map_forward_ssl {
{% for item in stream_ssl %}
{{ item.external.domain }} {{ item.internal.domain }}:{{ item.internal.port }};
{% endfor %}
}
server {
access_log /var/log/nginx/nginx_stream_access.log ssl;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 443;
proxy_pass $map_forward_ssl;
ssl_preread on;
proxy_socket_keepalive on;
resolver 10.1.0.1;
}
map $server_port $map_forward_port {
{% for item in stream_ports %}
{{ item.external }} {{ item.internal }};
{% endfor %}
}
server {
{% for item in stream_ports %}
listen {{ item.external }};
{% endfor %}
access_log /var/log/nginx/nginx_stream_access.log port;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 443;
proxy_pass $map_forward_port;
proxy_socket_keepalive on;
resolver 10.1.0.1;
}
}

View File

@@ -0,0 +1,42 @@
- name: Update nginx stream configuration
hosts: yellow
vars_files:
- vars.yaml
tasks:
- name: Ensure nginx, certbot, and nginx-mod-stream are installed
ansible.builtin.dnf:
name:
- nginx
- nginx-mod-stream
state: present
- name: Remove http.d dir before repopulating
file:
path: /etc/nginx/http.d/
state: absent
- name: Remove stream.d dir before repopulating
file:
path: /etc/nginx/stream.d/
state: absent
- name: Create stream.d dir
ansible.builtin.file:
path: /etc/nginx/stream.d
state: directory
mode: '0755'
- name: Template nginx.conf
template:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
- name: Test nginx configuration
ansible.builtin.shell: /usr/sbin/nginx -t
- name: Stop nginx service
ansible.builtin.systemd_service:
state: stopped
name: nginx
- name: Reload nginx service
ansible.builtin.systemd_service:
state: started
name: nginx
enabled: true

View File

@@ -0,0 +1,49 @@
stream_ssl:
- external:
domain: gitea.reeseapps.com
internal:
domain: ingress-nginx.reeseapps.com
port: 443
protocol: https
- external:
domain: nextcloud.reeseapps.com
internal:
domain: docker.reeselink.com
port: 443
protocol: https
- external:
domain: jellyfin.reeseapps.com
internal:
domain: ingress-nginx.reeseapps.com
port: 443
protocol: https
- external:
domain: snapdrop.reeseapps.com
internal:
domain: ingress-nginx.reeseapps.com
port: 443
protocol: https
- external:
domain: bitwarden.reeseapps.com
internal:
domain: ingress-nginx.reeseapps.com
port: 443
protocol: https
- external:
domain: ollama.reeseapps.com
internal:
domain: localai.reeselink.com
port: 443
protocol: https
- external:
domain: chatreesept.reeseapps.com
internal:
domain: localai.reeselink.com
port: 443
protocol: https
stream_ports:
- external: 2222
internal: git.reeseapps.com:22
- external: 3478
internal: nextcloud.reeselink.com:3478

View File

@@ -0,0 +1,5 @@
# Open Voice OS
## Installation
<https://community.openconversational.ai/t/howto-begin-your-open-voice-os-journey-with-the-ovos-installer/14900>

View File

@@ -0,0 +1,87 @@
# Snapcast with MPD
## Install
## Setup
### MPD
```bash
sudo apt install --no-install-recommends mpd
```
/etc/mpd.conf
```conf
music_directory "/var/lib/mpd/music"
playlist_directory "/var/lib/mpd/playlists"
db_file "/var/lib/mpd/tag_cache"
log_file "/var/log/mpd/mpd.log"
pid_file "/run/mpd/pid"
state_file "/var/lib/mpd/state"
sticker_file "/var/lib/mpd/sticker.sql"
user "mpd"
bind_to_address "0.0.0.0"
input {
plugin "curl"
}
decoder {
plugin "hybrid_dsd"
enabled "no"
}
audio_output {
type "fifo"
name "Snapcast"
path "/tmp/mpdfifo"
format "48000:16:2"
mixer_type "software"
}
filesystem_charset "UTF-8"
```
### Snapserver
<https://github.com/badaix/snapcast>
/etc/snapserver.conf
```conf
[server]
[http]
enabled = true
bind_to_address = 0.0.0.0
port = 1780
doc_root = /usr/share/snapserver/snapweb
[tcp]
enabled = true
bind_to_address = 0.0.0.0
port = 1705
[stream]
stream = pipe:///tmp/mpdfifo?name=MPD
[logging]
```
### Snapclient
<https://github.com/badaix/snapcast>
/etc/default/snapclient
```bash
sudo -u snapclient /usr/bin/snapclient -l
sudo vim /etc/default/snapclient
```
```conf
SNAPCLIENT_OPTS="-s plughw:CARD=Speaker,DEV=0 -h 10.1.134.41"
```

View File

@@ -0,0 +1,122 @@
# Wyoming Satellite
## Install
<https://github.com/rhasspy/wyoming-satellite/blob/master/docs/tutorial_2mic.md>
```bash
sudo apt update && sudo apt upgrade -y
sudo apt install --no-install-recommends \
git \
python3-venv \
vim \
tmux
sudo reboot
git clone https://github.com/rhasspy/wyoming-satellite.git
cd wyoming-satellite/
python3 -m venv .venv
.venv/bin/pip3 install --upgrade pip
.venv/bin/pip3 install --upgrade wheel setuptools
.venv/bin/pip3 install \
-f 'https://synesthesiam.github.io/prebuilt-apps/' \
-r requirements.txt \
-r requirements_audio_enhancement.txt \
-r requirements_vad.txt
# Find microphone
arecord -L | grep plughw -A 2
# Create a test recording
arecord -D plughw:CARD=JV801,DEV=0 -r 16000 -c 1 -f S16_LE -t wav -d 5 test.wav
# Find speaker
aplay -L | grep plughw -A 2
# Play test recording
aplay -D plughw:CARD=JV801,DEV=0 test.wav
# Bedroom
script/run \
--debug \
--name 'Bedroom Satellite' \
--uri 'tcp://0.0.0.0:10700' \
--mic-command 'arecord -D plughw:CARD=Speaker,DEV=0 -r 16000 -c 1 -f S16_LE -t raw' \
--snd-command 'aplay -D plughw:CARD=Speaker,DEV=0 -r 22050 -c 1 -f S16_LE -t raw'
# Living Room
script/run \
--debug \
--name 'Living Room Satellite' \
--uri 'tcp://0.0.0.0:10700' \
--mic-command 'arecord -D plughw:CARD=Speaker,DEV=0 -r 16000 -c 1 -f S16_LE -t raw' \
--snd-command 'aplay -D plughw:CARD=Speaker,DEV=0 -r 22050 -c 1 -f S16_LE -t raw'
```
## Systemd
### Create and Edit
```bash
sudo systemctl edit --force --full wyoming-satellite.service
sudo systemctl enable --now wyoming-satellite.service
sudo journalctl -u wyoming-satellite.service -f
```
### Bedroom
```conf
[Unit]
Description=Wyoming Satellite
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
ExecStart=/home/ducoterra/wyoming-satellite/script/run \
--name 'Bedroom Satellite' \
--uri 'tcp://0.0.0.0:10700' \
--mic-command 'arecord -D plughw:CARD=Speaker,DEV=0 -r 16000 -c 1 -f S16_LE -t raw' \
--snd-command 'aplay -D plughw:CARD=Speaker,DEV=0 -r 22050 -c 1 -f S16_LE -t raw'
WorkingDirectory=/home/ducoterra/wyoming-satellite
Restart=always
RestartSec=1
[Install]
WantedBy=default.target
```
### Living Room
```conf
[Unit]
Description=Wyoming Satellite
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
ExecStart=/home/ducoterra/wyoming-satellite/script/run \
--name 'Living Room Satellite' \
--uri 'tcp://0.0.0.0:10700' \
--mic-command 'arecord -D plughw:CARD=Speaker,DEV=0 -r 16000 -c 1 -f S16_LE -t raw' \
--snd-command 'aplay -D plughw:CARD=Speaker,DEV=0 -r 22050 -c 1 -f S16_LE -t raw'
WorkingDirectory=/home/ducoterra/wyoming-satellite
Restart=always
RestartSec=1
[Install]
WantedBy=default.target
```
## Volume
Run `alsamixer`.
## Community Wake Words
<https://github.com/fwartner/home-assistant-wakewords-collection?tab=readme-ov-file>
<https://github.com/fwartner/ha-openwakeword-installer>