the great migration from truenas to fedora and all its collatoral
All checks were successful
Reese's Arch Toolbox / build-and-push-arch-toolbox (push) Successful in 24m47s
All checks were successful
Reese's Arch Toolbox / build-and-push-arch-toolbox (push) Successful in 24m47s
This commit is contained in:
63
podman/graduated/caddy/caddy.md
Normal file
63
podman/graduated/caddy/caddy.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Caddy Reverse Proxy
|
||||
|
||||
## Install Caddy
|
||||
|
||||
As root
|
||||
|
||||
```bash
|
||||
mkdir /etc/caddy
|
||||
vim /etc/caddy/Caddyfile
|
||||
```
|
||||
|
||||
Caddy will automatically provision certificates if the server DNS points to the correct IP
|
||||
and is accessible on the ports specifified. All you need to do is put `https` in the caddy conf.
|
||||
|
||||
```conf
|
||||
# Nextcloud
|
||||
https://nextcloud.reeseapps.com:443 {
|
||||
reverse_proxy podman.reeselink.com:11000
|
||||
}
|
||||
|
||||
https://nextcloud.reeseapps.com:8443 {
|
||||
reverse_proxy podman.reeselink.com:11001 {
|
||||
transport http {
|
||||
tls_insecure_skip_verify
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Gitea
|
||||
https://gitea.reeseapps.com:443 {
|
||||
reverse_proxy podman.reeselink.com:3000
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
vim /etc/containers/systemd/caddy.container
|
||||
```
|
||||
|
||||
```conf
|
||||
[Unit]
|
||||
Description=Caddy
|
||||
|
||||
[Container]
|
||||
AddCapability=NET_ADMIN
|
||||
ContainerName=caddy
|
||||
Image=docker.io/caddy:2
|
||||
Network=host
|
||||
SecurityLabelDisable=true
|
||||
Volume=/etc/caddy:/etc/caddy
|
||||
Volume=caddy_data:/data
|
||||
Volume=caddy_config:/config
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
```
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl start caddy
|
||||
```
|
||||
45
podman/graduated/gitea/compose.yaml
Normal file
45
podman/graduated/gitea/compose.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
version: "3"
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
|
||||
services:
|
||||
gitea:
|
||||
image: docker.gitea.com/gitea:1.23.7
|
||||
container_name: gitea
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
- GITEA__database__DB_TYPE=postgres
|
||||
- GITEA__database__HOST=postgres:5432
|
||||
- GITEA__database__NAME=gitea
|
||||
- GITEA__database__USER=gitea
|
||||
- GITEA__database__PASSWD=gitea
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /home/gitea/gitea:/data
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
depends_on:
|
||||
- postgres
|
||||
security_opt:
|
||||
- label=disable
|
||||
|
||||
postgres:
|
||||
image: docker.io/library/postgres:15
|
||||
container_name: postgres
|
||||
restart: always
|
||||
environment:
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD=gitea
|
||||
- POSTGRES_DB=gitea
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /home/gitea/postgres:/var/lib/postgresql/data
|
||||
security_opt:
|
||||
- label=disable
|
||||
110
podman/graduated/gitea/gitea.md
Normal file
110
podman/graduated/gitea/gitea.md
Normal file
@@ -0,0 +1,110 @@
|
||||
# Gitea
|
||||
|
||||
## Gitea on Rootless Podman
|
||||
|
||||
### Create the gitea user
|
||||
|
||||
```bash
|
||||
useradd gitea
|
||||
|
||||
su - gitea
|
||||
ssh-keygen
|
||||
exit
|
||||
cp ~/.ssh/authorized_keys /home/gitea/.ssh/authorized_keys
|
||||
chown gitea:gitea /home/gitea/.ssh/authorized_keys
|
||||
loginctl enable-linger $(id -u gitea)
|
||||
```
|
||||
|
||||
SSH into the server as gitea
|
||||
|
||||
```bash
|
||||
systemctl --user enable podman-restart
|
||||
systemctl --user enable --now podman.socket
|
||||
mkdir -p ~/.config/containers/systemd
|
||||
mkdir data config postgres
|
||||
```
|
||||
|
||||
### Convert Compose to Quadlet
|
||||
|
||||
```bash
|
||||
# Run this in Homelab, not on the serrver.
|
||||
mkdir quadlets
|
||||
|
||||
# Generate the systemd service
|
||||
podman run \
|
||||
--security-opt label=disable \
|
||||
--rm \
|
||||
-v $(pwd):/compose \
|
||||
-v $(pwd)/quadlets:/quadlets \
|
||||
quay.io/k9withabone/podlet \
|
||||
-f /quadlets \
|
||||
-i \
|
||||
--overwrite \
|
||||
compose /compose/compose.yaml
|
||||
|
||||
# Copy the files to the server
|
||||
scp -r quadlets/. gitea:~/.config/containers/systemd/
|
||||
```
|
||||
|
||||
### Install Quadlets
|
||||
|
||||
The first user you register will be the admin
|
||||
|
||||
```bash
|
||||
ssh gitea
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user start gitea postgres
|
||||
```
|
||||
|
||||
## Gitea Runners
|
||||
|
||||
<https://docs.gitea.com/next/usage/actions/act-runner/#install-with-the-docker-image>
|
||||
|
||||
### Firewall Rules
|
||||
|
||||
Since our runner will be contacting our public IP, we need to add a firewall rule to allow
|
||||
traffic from our DMZ network to our DMZ network. Do this in Unifi or whatever equivalent
|
||||
you have.
|
||||
|
||||
### Install
|
||||
|
||||
```bash
|
||||
touch config.yaml
|
||||
|
||||
export GITEA_TOKEN=
|
||||
docker run \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-e GITEA_INSTANCE_URL=https://gitea.reeseapps.com \
|
||||
-e GITEA_RUNNER_REGISTRATION_TOKEN=$GITEA_TOKEN \
|
||||
-e GITEA_RUNNER_NAME=gitea_runner \
|
||||
--restart always \
|
||||
--name gitea_runner \
|
||||
-d docker.io/gitea/act_runner:latest
|
||||
```
|
||||
|
||||
### Cache Cleanup
|
||||
|
||||
Each org or project with a package registry will have its own cleanup rules. For example,
|
||||
services -> settings -> Packages -> Add Cleanup Rule will allow you to create a cleanup
|
||||
rule for packages stored under the "services" org. These cleanup rules should run automatically.
|
||||
|
||||
On the other hand, the docker builder cache will balloon out of control over time. The gitea
|
||||
docker runner is handled outside of Gitea's context, so you'll need to clean it up yourself.
|
||||
|
||||
```bash
|
||||
# Check used system resources
|
||||
docker system df
|
||||
```
|
||||
|
||||
You should run something like this on a schedule:
|
||||
|
||||
```bash
|
||||
# Prune the builder cache
|
||||
docker builder prune -a
|
||||
```
|
||||
|
||||
To run it every day at midnight: `crontab -e`
|
||||
|
||||
```bash
|
||||
0 0 * * * yes | docker builder prune -a
|
||||
```
|
||||
19
podman/graduated/gitea/quadlets/gitea.container
Normal file
19
podman/graduated/gitea/quadlets/gitea.container
Normal file
@@ -0,0 +1,19 @@
|
||||
[Unit]
|
||||
Requires=postgres.service
|
||||
|
||||
[Container]
|
||||
ContainerName=gitea
|
||||
Environment=USER_UID=1000 USER_GID=1000 GITEA__database__DB_TYPE=postgres GITEA__database__HOST=postgres:5432 GITEA__database__NAME=gitea GITEA__database__USER=gitea GITEA__database__PASSWD=gitea
|
||||
Image=docker.gitea.com/gitea:1.23.7
|
||||
Network=gitea.network
|
||||
PublishPort=3000:3000
|
||||
PublishPort=2222:22
|
||||
SecurityLabelDisable=true
|
||||
Volume=/home/gitea/gitea:/data
|
||||
Volume=/etc/localtime:/etc/localtime:ro
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
4
podman/graduated/gitea/quadlets/gitea.network
Normal file
4
podman/graduated/gitea/quadlets/gitea.network
Normal file
@@ -0,0 +1,4 @@
|
||||
[Network]
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
13
podman/graduated/gitea/quadlets/postgres.container
Normal file
13
podman/graduated/gitea/quadlets/postgres.container
Normal file
@@ -0,0 +1,13 @@
|
||||
[Container]
|
||||
ContainerName=postgres
|
||||
Environment=POSTGRES_USER=gitea POSTGRES_PASSWORD=gitea POSTGRES_DB=gitea
|
||||
Image=docker.io/library/postgres:15
|
||||
Network=gitea.network
|
||||
SecurityLabelDisable=true
|
||||
Volume=/home/gitea/postgres:/var/lib/postgresql/data
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
@@ -4,9 +4,10 @@
|
||||
- [Install with Rootless Podman](#install-with-rootless-podman)
|
||||
- [Create the nextcloud user](#create-the-nextcloud-user)
|
||||
- [Install Podman](#install-podman)
|
||||
- [Install Caddy](#install-caddy)
|
||||
- [Create the container autostart service](#create-the-container-autostart-service)
|
||||
- [Install Nextcloud](#install-nextcloud)
|
||||
- [Install Caddy](#install-caddy)
|
||||
- [Firewall](#firewall)
|
||||
- [Backups](#backups)
|
||||
- [Maintenace Mode](#maintenace-mode)
|
||||
- [Trusted Proxy](#trusted-proxy)
|
||||
@@ -32,21 +33,84 @@ This has been tested working on Fedora 41 with selinux and firewalld enabled.
|
||||
### Create the nextcloud user
|
||||
|
||||
```bash
|
||||
sudo useradd nextcloud
|
||||
sudo loginctl enable-linger $(id -u nextcloud)
|
||||
useradd nextcloud
|
||||
su - nextcloud
|
||||
ssh-keygen
|
||||
exit
|
||||
cp ~/.ssh/authorized_keys /home/nextcloud/.ssh/authorized_keys
|
||||
chown nextcloud:nextcloud /home/nextcloud/.ssh/authorized_keys
|
||||
loginctl enable-linger $(id -u nextcloud)
|
||||
```
|
||||
|
||||
### Install Podman
|
||||
|
||||
```bash
|
||||
# As admin user
|
||||
sudo dnf install podman
|
||||
# As root user
|
||||
dnf install podman
|
||||
|
||||
# Now SSH into the server as the nextcloud user
|
||||
systemctl --user enable podman-restart
|
||||
systemctl --user enable --now podman.socket
|
||||
```
|
||||
|
||||
### Create the container autostart service
|
||||
|
||||
As the nextcloud user.
|
||||
|
||||
`systemctl --user edit podman-restart.service`
|
||||
|
||||
```conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/podman $LOGGING start --all --filter restart-policy=always --filter restart-policy=unless-stopped
|
||||
ExecStop=
|
||||
ExecStop=/bin/sh -c '/usr/bin/podman $LOGGING stop $(/usr/bin/podman container ls --filter restart-policy=always --filter restart-policy=unless-stopped -q)'
|
||||
```
|
||||
|
||||
```bash
|
||||
systemctl --user daemon-reload
|
||||
```
|
||||
|
||||
### Install Nextcloud
|
||||
|
||||
`mkdir -p ~/.config/containers/systemd`
|
||||
|
||||
`vim ~/.config/containers/systemd/nextcloud-aio-mastercontainer.container`
|
||||
|
||||
```conf
|
||||
[Unit]
|
||||
Description=Nextcloud AIO Master Container
|
||||
Documentation=https://github.com/nextcloud/all-in-one/blob/main/docker-rootless.md
|
||||
After=local-fs.target
|
||||
Requires=podman.socket
|
||||
|
||||
[Container]
|
||||
ContainerName=nextcloud-aio-mastercontainer
|
||||
Image=docker.io/nextcloud/all-in-one:latest
|
||||
PublishPort=0.0.0.0:11001:8080
|
||||
Volume=nextcloud_aio_mastercontainer:/mnt/docker-aio-config
|
||||
Volume=/run/user/1001/podman/podman.sock:/var/run/docker.sock:Z
|
||||
Network=bridge
|
||||
SecurityLabelDisable=true
|
||||
|
||||
Environment=APACHE_PORT=11000
|
||||
Environment=APACHE_IP_BINDING=0.0.0.0
|
||||
Environment=WATCHTOWER_DOCKER_SOCKET_PATH=/run/user/1001/podman/podman.sock
|
||||
Environment=NEXTCLOUD_DATADIR="/home/nextcloud/nextcloud_data"
|
||||
Environment=SKIP_DOMAIN_VALIDATION=true
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target default.target
|
||||
```
|
||||
|
||||
```bash
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user start nextcloud-aio-mastercontainer
|
||||
```
|
||||
|
||||
### Install Caddy
|
||||
|
||||
As root
|
||||
@@ -84,7 +148,7 @@ Description=Caddy
|
||||
[Container]
|
||||
AddCapability=NET_ADMIN
|
||||
ContainerName=caddy
|
||||
Image=caddy
|
||||
Image=docker.io/caddy:2
|
||||
Network=host
|
||||
SecurityLabelDisable=true
|
||||
Volume=/etc/caddy:/etc/caddy
|
||||
@@ -103,62 +167,10 @@ systemctl daemon-reload
|
||||
systemctl start caddy
|
||||
```
|
||||
|
||||
### Create the container autostart service
|
||||
|
||||
As the nextcloud user.
|
||||
### Firewall
|
||||
|
||||
`systemctl --user edit podman-restart.service`
|
||||
|
||||
```conf
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/usr/bin/podman $LOGGING start --all --filter restart-policy=always --filter restart-policy=unless-stopped
|
||||
ExecStop=
|
||||
ExecStop=/bin/sh -c '/usr/bin/podman $LOGGING stop $(/usr/bin/podman container ls --filter restart-policy=always --filter restart-policy=unless-stopped -q)'
|
||||
```
|
||||
|
||||
```bash
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user enable podman-restart
|
||||
```
|
||||
|
||||
### Install Nextcloud
|
||||
|
||||
`vim ~/.config/containers/systemd/nextcloud-aio-mastercontainer.container`
|
||||
|
||||
```conf
|
||||
[Unit]
|
||||
Description=Nextcloud AIO Master Container
|
||||
Documentation=https://github.com/nextcloud/all-in-one/blob/main/docker-rootless.md
|
||||
After=local-fs.target
|
||||
Requires=podman.socket
|
||||
|
||||
[Container]
|
||||
ContainerName=nextcloud-aio-mastercontainer
|
||||
Image=docker.io/nextcloud/all-in-one:latest
|
||||
PublishPort=127.0.0.1:11001:8080
|
||||
Volume=nextcloud_aio_mastercontainer:/mnt/docker-aio-config
|
||||
Volume=/run/user/1001/podman/podman.sock:/var/run/docker.sock:Z
|
||||
Network=bridge
|
||||
SecurityLabelDisable=true
|
||||
|
||||
Environment=APACHE_PORT=11000
|
||||
Environment=APACHE_IP_BINDING=127.0.0.1
|
||||
Environment=WATCHTOWER_DOCKER_SOCKET_PATH=/run/user/1001/podman/podman.sock
|
||||
Environment=NEXTCLOUD_DATADIR="/home/nextcloud/nextcloud_data"
|
||||
Environment=SKIP_DOMAIN_VALIDATION=true
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target default.target
|
||||
```
|
||||
|
||||
```bash
|
||||
systemctl --user daemon-reload
|
||||
systemctl --user start nextcloud-aio-mastercontainer
|
||||
```
|
||||
Allow traffic to 11000 from your reverse proxy
|
||||
|
||||
## Backups
|
||||
|
||||
@@ -250,7 +262,6 @@ Sometimes this is caused by a broken app or twofactor. try:
|
||||
./occ app:disable integration_openai
|
||||
```
|
||||
|
||||
|
||||
## Freezing after working for a bit
|
||||
|
||||
### Out of disk space
|
||||
@@ -270,4 +281,4 @@ This can happen when the redis volume doesn't have the correct permissions
|
||||
podman exec -it --user root nextcloud-aio-redis bash
|
||||
ls -lah /data
|
||||
chown redis:redis /data
|
||||
```
|
||||
```
|
||||
|
||||
20
podman/graduated/nginx/nginx.conf
Normal file
20
podman/graduated/nginx/nginx.conf
Normal file
@@ -0,0 +1,20 @@
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
|
||||
error_log /var/log/nginx/error.log notice;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
|
||||
stream {
|
||||
server {
|
||||
listen 2222;
|
||||
listen [::]:2222;
|
||||
proxy_pass podman.reeselink.com:2222;
|
||||
}
|
||||
}
|
||||
37
podman/graduated/nginx/nginx.md
Normal file
37
podman/graduated/nginx/nginx.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Ngnix
|
||||
|
||||
## TCP Stream Proxy
|
||||
|
||||
```bash
|
||||
# Get the initial configuration
|
||||
podman run --rm --entrypoint=cat docker.io/nginx /etc/nginx/nginx.conf > nginx.conf
|
||||
scp nginx.conf 3dserver:/etc/nginx/nginx.conf
|
||||
```
|
||||
|
||||
```bash
|
||||
vim /etc/containers/systemd/nginx.container
|
||||
```
|
||||
|
||||
```conf
|
||||
[Unit]
|
||||
Description=Nginx
|
||||
|
||||
[Container]
|
||||
AddCapability=NET_ADMIN
|
||||
ContainerName=nginx
|
||||
Image=docker.io/nginx
|
||||
Network=host
|
||||
SecurityLabelDisable=true
|
||||
Volume=/etc/nginx:/etc/nginx
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
```
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl start nginx
|
||||
```
|
||||
Reference in New Issue
Block a user