nginx load balancing internal/external working

This commit is contained in:
2024-05-29 01:07:21 -04:00
parent d732fffd67
commit 3cf600b048
28 changed files with 348 additions and 197 deletions

View File

@@ -111,7 +111,9 @@ our own components.
"--disable" \ "--disable" \
"servicelb" \ "servicelb" \
"--cluster-dns" \ "--cluster-dns" \
"10.43.0.10" "10.43.0.10" \
"--tls-san" \
"kube.reeselink.com" \
3. Join each server node 3. Join each server node
@@ -129,7 +131,9 @@ our own components.
"--disable" \ "--disable" \
"servicelb" \ "servicelb" \
"--cluster-dns" \ "--cluster-dns" \
"10.43.0.10" "10.43.0.10" \
"--tls-san" \
"kube.reeselink.com" \
Now you can change the ownership of (and copy) the k3s.yaml file: Now you can change the ownership of (and copy) the k3s.yaml file:

View File

@@ -64,11 +64,16 @@ which explains how to set up a basic fedora server hosting platform with certbot
We'll use our own coredns server so we can add custom hosts. This prevents the server from collapsing We'll use our own coredns server so we can add custom hosts. This prevents the server from collapsing
if the internet drops out (something that apparently happens quite frequently) if the internet drops out (something that apparently happens quite frequently)
One key entry in the coredns config is `driveripper.reeselink.com` pointing to the internal
IP `172.20.0.1`. This ensures democratic-csi can access the truenas server without internet
or DNS.
```bash ```bash
helm repo add coredns https://coredns.github.io/helm helm repo add coredns https://coredns.github.io/helm
helm repo update helm repo update
helm upgrade --install \ helm upgrade --install \
--namespace=kube-system \ --namespace=coredns \
--create-namespace \
--values coredns-values.yaml \ --values coredns-values.yaml \
coredns \ coredns \
coredns/coredns coredns/coredns

View File

@@ -9,7 +9,7 @@ kubernetes:
colors: colors:
hosts: hosts:
orange: # orange:
yellow: yellow:
apt: apt:

View File

@@ -128,6 +128,8 @@ servers:
parameters: /etc/coredns/democratic-csi-server.reeselink.db democratic-csi-server.reeselink.com parameters: /etc/coredns/democratic-csi-server.reeselink.db democratic-csi-server.reeselink.com
- name: file - name: file
parameters: /etc/coredns/democratic-csi-client.reeselink.db democratic-csi-client.reeselink.com parameters: /etc/coredns/democratic-csi-client.reeselink.db democratic-csi-client.reeselink.com
- name: file
parameters: /etc/coredns/driveripper.reeselink.db driveripper.reeselink.com
- name: forward - name: forward
parameters: . /etc/resolv.conf parameters: . /etc/resolv.conf
- name: cache - name: cache
@@ -232,6 +234,13 @@ zoneFiles:
democratic-csi-client.reeselink.com. IN NS b.iana-servers.net. democratic-csi-client.reeselink.com. IN NS b.iana-servers.net.
democratic-csi-client.reeselink.com. IN NS a.iana-servers.net. democratic-csi-client.reeselink.com. IN NS a.iana-servers.net.
democratic-csi-client.reeselink.com. IN A 127.0.0.1 democratic-csi-client.reeselink.com. IN A 127.0.0.1
- filename: driveripper.reeselink.db
domain: driveripper.reeselink.com
contents: |
driveripper.reeselink.com. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600
driveripper.reeselink.com. IN NS b.iana-servers.net.
driveripper.reeselink.com. IN NS a.iana-servers.net.
driveripper.reeselink.com. IN A 172.20.0.1
# optional array of extra volumes to create # optional array of extra volumes to create
extraVolumes: [] extraVolumes: []

View File

@@ -1,7 +0,0 @@
# DNS Server
## Install
```bash
ansible-playbook -i ansible/inventory.yaml dns/dns.yaml
```

View File

@@ -1,3 +0,0 @@
server=10.1.0.1
cache-size=1000
address=/.reeseapps.com/10.1.203.197

View File

@@ -1,25 +0,0 @@
- name: Update dnsmasq server
hosts: dns
become: true
become_user: root
become_method: sudo
tasks:
- name: Ensure dnsmasq is installed
ansible.builtin.apt:
pkg:
- dnsmasq
- dnsutils
- name: Copy dns configurations
template:
src: "{{ item }}"
dest: /etc/dnsmasq.d/{{ item | basename }}
owner: root
group: root
mode: '0644'
with_fileglob:
- conf.d/*
- name: Reload dnsmasq service
ansible.builtin.systemd_service:
state: restarted
name: dnsmasq
enabled: true

View File

@@ -9,3 +9,5 @@ controller:
use-forwarded-headers: "true" use-forwarded-headers: "true"
compute-full-forwarded-for: "true" compute-full-forwarded-for: "true"
proxy-real-ip-cidr: "0.0.0.0/0" proxy-real-ip-cidr: "0.0.0.0/0"
use-proxy-protocol: "true"
log-format-upstream: '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Referer: $http_referer | $request | $time_local | $status |'

View File

@@ -14,3 +14,18 @@ ansible-playbook -i ansible/inventory.yaml nginx/nginx.yaml
We can detect whether traffic is originating internally or externally by checking if We can detect whether traffic is originating internally or externally by checking if
it came in on port 443 or 444. it came in on port 443 or 444.
External traffic always come in through 444.
## Certbot
Use `certbot delete` to remove unused certs.
## vars.yaml
`allowed_ips` restricts access to the endpoint (deny all) and then allows only the list
of ips provided.
## Logging
You can tail all the nginx logs with `tail -f /var/log/nginx/*`

View File

@@ -16,7 +16,3 @@
ansible.builtin.shell: /usr/bin/certbot certonly --dns-route53 -d '{{ item.1 }}' -n ansible.builtin.shell: /usr/bin/certbot certonly --dns-route53 -d '{{ item.1 }}' -n
# Loops over every external.domains sub list # Loops over every external.domains sub list
loop: "{{ http | subelements('external.domains') }}" loop: "{{ http | subelements('external.domains') }}"
- name: Start nginx service
ansible.builtin.systemd_service:
state: reloaded
name: nginx

View File

@@ -4,25 +4,33 @@ map $http_upgrade $connection_upgrade {
} }
server { server {
access_log /var/log/nginx/nginx_https_access.log basic;
error_log /var/log/nginx/nginx_https_error.log warn;
listen 127.0.0.1:443 ssl; {%- for port in item.0.external.ports +%}
listen 127.0.0.1:{{ port }} ssl proxy_protocol;
{%- endfor +%}
listen 127.0.0.1:80;
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
set_real_ip_from 127.0.0.1;
server_name {{ item.1 }}; server_name {{ item.1 }};
access_log /var/log/nginx/{{ item.1 }}-access.log compression;
http2 on;
location / { location / {
proxy_pass {{ item.0.internal.protocol }}://{{ item.0.internal.ip }}:{{ item.0.internal.port }}$request_uri; proxy_pass {{ item.0.internal.protocol }}://{{ item.0.internal.ip }}:{{ item.0.internal.port }}$request_uri;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Scheme $scheme; proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Accept-Encoding ""; proxy_set_header Accept-Encoding "";
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
client_body_buffer_size 512k; client_body_buffer_size 512k;
proxy_read_timeout 86400s; proxy_read_timeout 86400s;
@@ -45,13 +53,11 @@ server {
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305; ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
ssl_prefer_server_ciphers on; ssl_prefer_server_ciphers on;
# Optional settings:
# OCSP stapling # OCSP stapling
# ssl_stapling on; ssl_stapling on;
# ssl_stapling_verify on; ssl_stapling_verify on;
# ssl_trusted_certificate /etc/letsencrypt/live/<your-nc-domain>/chain.pem; ssl_trusted_certificate /etc/letsencrypt/live/{{ item.1 }}/fullchain.pem;
# replace with the IP address of your resolver # replace with the IP address of your resolver
# resolver 127.0.0.1; # needed for oscp stapling: e.g. use 94.140.15.15 for adguard / 1.1.1.1 for cloudflared or 8.8.8.8 for google - you can use the same nameserver as listed in your /etc/resolv.conf file resolver 127.0.0.1;
} }

View File

@@ -1,3 +1,27 @@
{%- set unique_ports = [] %}
{%- for port in default_ports %}
{{- unique_ports.append(port) }}
{%- endfor %}
# For each domain we want to terminate, forward to internal http server
{%- set http_domains = [] %}
{%- for item in (http | subelements('external.domains')) %}
{#- Collect unique domains #}
{%- if item.1 not in http_domains %}
{{- http_domains.append(item.1) }}
{%- endif %}
{#- Collect unique ports #}
{%- for port in item.0.external.ports %}
{%- if port not in unique_ports %}
{{- unique_ports.append(port) }}
{%- endif %}
{%- endfor %}
{%- endfor %}
load_module /usr/lib64/nginx/modules/ngx_stream_module.so; load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
worker_processes 8; worker_processes 8;
@@ -5,55 +29,98 @@ worker_processes 8;
events {} events {}
stream { stream {
log_format basic '$remote_addr $domain [$time_local] ' log_format basic '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward IP: $forward_ip:$upstream_port | Upstream Addr: $upstream_addr | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
'$protocol $status $bytes_sent $bytes_received '
'$session_time';
include /etc/nginx/stream.d/*.conf;
# Map all SSL parsed server names to hosts # Map all SSL parsed server names to hosts
map $ssl_preread_server_name $domain { map $ssl_preread_server_name $forward_ip {
"" 127.0.0.1:443; # Empty ssl preread gets forwarded to internal
"" 127.0.0.1;
# For each domain we need to stream to a remote server, forward to internal ip {% for item in http_domains %}
{% for item in (stream | subelements('external.domains')) %} {{ item }} 127.0.0.1;
{{ item.1 }} {{ item.0.internal.ip }}:{{ item.0.internal.port }};
{% endfor %} {% endfor %}
# For each domain we want to terminate, forward to internal http server default {{ nginx.defaults.ip }};
{% for item in (http | subelements('external.domains')) %} }
{{ item.1 }} 127.0.0.1:443;
{% endfor %} # Since external traffic will be coming in on port 444, and we need to get some of that traffic
# to kubernetes ingress-nginx on port 443, we need to detect if the destination IP is kubernetes.
default {{ nginx.defaults.domain }}:443; # If it is, forward that traffic to port 443. Otherwise, preserve the original port the traffic
# came in on.
map $forward_ip $upstream_port {
{{ nginx.defaults.ip }} 443;
default $server_port;
} }
# Forward 443 traffic
server { server {
access_log /var/log/nginx/stream-access-443.log basic; access_log /var/log/nginx/nginx_stream_access.log basic;
listen {{ ansible_default_ipv4.address }}:443; error_log /var/log/nginx/nginx_stream_error.log warn;
resolver 1.1.1.1;
proxy_pass $domain; proxy_protocol on;
{% for port in unique_ports %}
listen {{ ansible_default_ipv4.address }}:{{ port }};
{% endfor %}
proxy_pass $forward_ip:$upstream_port;
ssl_preread on; ssl_preread on;
proxy_socket_keepalive on; proxy_socket_keepalive on;
} }
include /etc/nginx/stream.d/*.conf;
} }
http { http {
log_format compression '$remote_addr - $remote_user [$time_local] ' log_format basic '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Forward IP: $forward_ip | Referer: $http_referer | $request | $time_local | $status |';
'"$request" $status $bytes_sent '
'"$http_referer" "$http_user_agent" "$gzip_ratio"';
map $host $forward_ip {
"" "";
{% for item in http_domains %}
{{ item }} "";
{% endfor %}
default {{ nginx.defaults.ip }};
}
# Internal requests come through 80
server { server {
access_log /var/log/nginx/http-access.log compression; access_log /var/log/nginx/nginx_http_access.log basic;
error_log /var/log/nginx/nginx_http_error.log warn;
listen 80 default_server; listen 127.0.0.1:80 default_server proxy_protocol;
location / {
# If we have a foward IP, forward the traffic
if ($forward_ip) {
proxy_pass $forward_ip:80;
}
# Else redirect if the scheme is http
if ($scheme = "http") { if ($scheme = "http") {
return 301 https://$host:443$request_uri; return 301 https://$host:443$request_uri;
} }
} }
}
# External requests come through 81
server {
access_log /var/log/nginx/nginx_http_access.log basic;
error_log /var/log/nginx/nginx_http_error.log warn;
listen 127.0.0.1:81 default_server proxy_protocol;
location / {
# If we have a foward IP, forward the traffic
if ($forward_ip) {
proxy_pass $forward_ip:81;
}
# Else redirect if the scheme is http
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
}
}
include /etc/nginx/http.d/*.conf; include /etc/nginx/http.d/*.conf;
} }

View File

@@ -58,13 +58,15 @@
- name: Template all http configurations - name: Template all http configurations
template: template:
src: https.conf src: https.conf
dest: /etc/nginx/http.d/{{ item.1 }}.conf dest: /etc/nginx/http.d/{{ item.1 }}.{{ item.0.internal.port }}.conf
owner: root owner: root
group: root group: root
mode: '0644' mode: '0644'
# item.0 == full dictionary # item.0 == full dictionary
# item.1 == external domain # item.1 == external domain
loop: "{{ http | subelements('external.domains') }}" loop: "{{ http | subelements('external.domains') }}"
- name: Test nginx configuration
ansible.builtin.shell: /usr/sbin/nginx -t
- name: Reload nginx service - name: Reload nginx service
ansible.builtin.systemd_service: ansible.builtin.systemd_service:
state: restarted state: restarted

View File

@@ -1,5 +1,7 @@
server { server {
access_log /var/log/nginx/gitea-ssh.log basic; access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:2222; listen {{ ansible_default_ipv4.address }}:2222;
proxy_pass lb.reeselink.com:2222; proxy_pass 10.1.2.100:2222;
} }

View File

@@ -1,5 +1,7 @@
server { server {
access_log /var/log/nginx/iperf.log basic; access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:5201; listen {{ ansible_default_ipv4.address }}:5201;
listen {{ ansible_default_ipv4.address }}:5201 udp; listen {{ ansible_default_ipv4.address }}:5201 udp;
proxy_pass 127.0.0.1:5201; proxy_pass 127.0.0.1:5201;

13
nginx/stream.d/kube.conf Normal file
View File

@@ -0,0 +1,13 @@
upstream kube_backend {
server 10.1.2.13:6443 max_fails=2 fail_timeout=30s;
server 10.1.2.14:6443 max_fails=2 fail_timeout=30s;
server 10.1.2.15:6443 max_fails=2 fail_timeout=30s;
}
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:6443;
proxy_pass kube_backend;
}

View File

@@ -1,6 +1,8 @@
server { server {
access_log /var/log/nginx/unifi-external.log basic; access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
resolver 1.1.1.1; resolver 1.1.1.1;
listen {{ ansible_default_ipv4.address }}:8080; listen {{ ansible_default_ipv4.address }}:8082;
proxy_pass {{ unifi_external.domain }}:8080; proxy_pass {{ unifi_external.domain }}:8080;
} }

View File

@@ -1,35 +1,54 @@
nginx: nginx:
defaults: defaults:
domain: nginx.reeselink.com ip: "10.1.2.101"
iperf: iperf:
domain: lb.reeselink.com domain: 10.1.2.100
unifi_external: unifi_external:
domain: unifi-server1.reeselink.com domain: unifi-server1.reeselink.com
internal_ip: 10.1.0.0/16 internal_ip: 10.1.0.0/16
cr10se: default_ports:
- external: - 80
domains: - 81
- cr10se.reeseseal.com - 443
port: 443 - 444
internal:
ip: "10.3.165.70"
port: 80
protocol: http
http: http:
- external: - external:
domains: domains:
- homeassistant.reeseapps.com - homeassistant.reeseapps.com
- homeassistant.reeselink.com - homeassistant.reeselink.com
port: 443 ports:
- 443
- 444
internal: internal:
ip: "10.2.131.2" ip: "10.2.131.2"
port: 8123 port: 8123
protocol: https protocol: https
- external:
domains:
- driveripper.reeseapps.com
- driveripper.reeselink.com
ports:
- 443
- 444
internal:
ip: "10.1.2.10"
port: 8443
protocol: https
- external:
domains:
- replicator.reeselink.com
ports:
- 443
internal:
ip: "10.2.224.77"
port: 80
protocol: http
- external: - external:
domains: domains:
- yellow.reeselink.com - yellow.reeselink.com
port: 443 ports:
- 443
internal: internal:
ip: "10.1.203.197" ip: "10.1.203.197"
port: 9090 port: 9090
@@ -37,7 +56,8 @@ http:
- external: - external:
domains: domains:
- node1.reeselink.com - node1.reeselink.com
port: 443 ports:
- 443
internal: internal:
ip: "10.1.2.13" ip: "10.1.2.13"
port: 9090 port: 9090
@@ -45,7 +65,8 @@ http:
- external: - external:
domains: domains:
- node2.reeselink.com - node2.reeselink.com
port: 443 ports:
- 443
internal: internal:
ip: "10.1.2.14" ip: "10.1.2.14"
port: 9090 port: 9090
@@ -53,17 +74,57 @@ http:
- external: - external:
domains: domains:
- node3.reeselink.com - node3.reeselink.com
port: 443 ports:
- 443
internal: internal:
ip: "10.1.2.15" ip: "10.1.2.15"
port: 9090 port: 9090
protocol: https protocol: https
# Printer
stream:
- external: - external:
domains: domains:
- containers.reeseapps.com - cr10se.reeselink.com
port: 443 ports:
- 443
internal: internal:
ip: "10.1.2.13" ip: "10.3.165.70"
port: 6443 port: 80
protocol: http
# Websocket
- external:
domains:
- cr10se.reeselink.com
ports:
- 9999
internal:
ip: "10.3.165.70"
port: 9999
protocol: http
# Camera
- external:
domains:
- cr10se.reeselink.com
ports:
- 8080
internal:
ip: "10.3.165.70"
port: 8080
protocol: http
- external:
domains:
- pihole.reeselink.com
ports:
- 443
internal:
ip: 10.1.203.197
port: 8081
protocol: http
- external:
domains:
- attmodem.reeselink.com
ports:
- 443
internal:
ip: 192.168.1.254
port: 80
protocol: http

View File

@@ -5,11 +5,7 @@
- [Podman systemd files](#podman-systemd-files) - [Podman systemd files](#podman-systemd-files)
- [iperf3](#iperf3) - [iperf3](#iperf3)
- [pihole](#pihole) - [pihole](#pihole)
- [Grafana](#grafana) - [Cloudflared](#cloudflared)
- [Nginx](#nginx)
- [Nginx Build](#nginx-build)
- [Nginx Run](#nginx-run)
- [Quadlet Generation](#quadlet-generation)
- [Update yellow/orange](#update-yelloworange) - [Update yellow/orange](#update-yelloworange)
## Notes ## Notes
@@ -53,10 +49,12 @@ podman run \
### pihole ### pihole
<https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts>
```bash ```bash
podman run \ podman run \
-v ./compose:/compose \ -v ./podman/compose:/compose \
-v ./quadlets:/quadlets \ -v ./podman/quadlets:/quadlets \
quay.io/k9withabone/podlet \ quay.io/k9withabone/podlet \
-f /quadlets \ -f /quadlets \
-i \ -i \
@@ -66,56 +64,25 @@ podman run \
compose /compose/pihole-compose.yaml compose /compose/pihole-compose.yaml
``` ```
### Grafana ### Cloudflared
Creates a DOH proxy for pihole. Just set the pihole upstream to `10.1.203.197#5053`.
```bash ```bash
podman run \ podman run \
-v ./compose:/compose \ -v ./podman/compose:/compose \
-v ./quadlets:/quadlets \ -v ./podman/quadlets:/quadlets \
quay.io/k9withabone/podlet \ quay.io/k9withabone/podlet \
-f /quadlets \ -f /quadlets \
-i \ -i \
--overwrite \ --overwrite \
compose /compose/grafana-compose.yaml --wants network-online.target \
--after network-online.target \
compose /compose/cloudflared-compose.yaml
``` ```
### Nginx ## Update yellow/orange
nginx proxies all other services.
#### Nginx Build
```bash ```bash
podman build -f nginx-stream/Containerfile -t docker.io/ducoterra/nginx-stream:latest ansible-playbook -i ./ansible/inventory.yaml podman/update-quadlets.yaml
podman build -f dns/Containerfile -t docker.io/ducoterra/nginx-stream-dns:latest
podman push docker.io/ducoterra/nginx-stream:latest
podman push docker.io/ducoterra/nginx-stream-dns:latest
podman-compose -f compose/nginx-compose.yaml up -d
```
#### Nginx Run
```bash
podman-compose -f compose/nginx-compose.yaml up
```
#### Quadlet Generation
```bash
podman run \
-v ./compose:/compose \
-v ./quadlets:/quadlets \
quay.io/k9withabone/podlet \
-f /quadlets \
-i \
--overwrite \
compose /compose/nginx-stream-compose.yaml
```
#### Update yellow/orange
```bash
ansible-playbook -i ansible/inventory.yaml ansible/update-quadlets.yaml
``` ```

View File

@@ -0,0 +1,12 @@
version: "3"
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
services:
cloudflared:
container_name: cloudflared
image: docker.io/cloudflare/cloudflared:2024.5.0
command: proxy-dns --address 0.0.0.0 --port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
ports:
- "0.0.0.0:5053:5053/tcp"
- "0.0.0.0:5053:5053/udp"
restart: unless-stopped

View File

@@ -4,7 +4,7 @@ version: "3"
services: services:
pihole: pihole:
container_name: pihole container_name: pihole
image: pihole/pihole:latest image: docker.io/pihole/pihole:2024.05.0
ports: ports:
- "0.0.0.0:53:53/tcp" - "0.0.0.0:53:53/tcp"
- "0.0.0.0:53:53/udp" - "0.0.0.0:53:53/udp"

View File

@@ -0,0 +1,15 @@
[Unit]
Wants=network-online.target
[Container]
ContainerName=cloudflared
Exec=proxy-dns --address 0.0.0.0 --port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
Image=docker.io/cloudflare/cloudflared:2024.5.0
PublishPort=0.0.0.0:5053:5053/tcp
PublishPort=0.0.0.0:5053:5053/udp
[Service]
Restart=always
[Install]
WantedBy=default.target

View File

@@ -3,8 +3,8 @@ Wants=network-online.target
[Container] [Container]
ContainerName=pihole ContainerName=pihole
Environment=TZ=America/Chicago "WEBPASSWORD=SET A PASSWORD HERE" Environment=TZ=America/Chicago
Image=pihole/pihole:latest Image=docker.io/pihole/pihole:2024.05.0
PublishPort=0.0.0.0:53:53/tcp PublishPort=0.0.0.0:53:53/tcp
PublishPort=0.0.0.0:53:53/udp PublishPort=0.0.0.0:53:53/udp
PublishPort=0.0.0.0:8081:80/tcp PublishPort=0.0.0.0:8081:80/tcp

View File

@@ -11,9 +11,15 @@
owner: root owner: root
group: root group: root
mode: '0644' mode: '0644'
with_items: loop:
- ../quadlets/iperf3.container - ./quadlets/iperf3.container
- ../quadlets/pihole.container - ./quadlets/pihole.container
- ./quadlets/cloudflared.container
- name: Daemon-reload to trigger re-read of quadlets - name: Daemon-reload to trigger re-read of quadlets
ansible.builtin.systemd_service: ansible.builtin.systemd_service:
daemon_reload: true daemon_reload: true
- name: Restart all quadlet services
ansible.builtin.systemd_service:
state: restarted
name: "{{ item }}"
loop: ["pihole", "iperf3", "cloudflared"]

View File

@@ -9,28 +9,28 @@
export SERVER=$1 export SERVER=$1
export FQDN=$2 export FQDN=$2
export USER=$3 export KUBE_USER=$3
export CERT_DIR=$HOME/.kube/$FQDN/users/$USER export CERT_DIR=$HOME/.kube/$FQDN/users/$KUBE_USER
export CA_CERT_DIR=$HOME/.kube/$FQDN export CA_CERT_DIR=$HOME/.kube/$FQDN
export SERVER_USER_DIR="~/.kube/users/$USER" export SERVER_USER_DIR="~/.kube/users/$KUBE_USER"
export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g') export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g')
export SERVER_USER="$USER-$SERVER_NAME" export SERVER_USER="$KUBE_USER-$SERVER_NAME"
export KUBECONFIG="$HOME/.kube/$USER-config" export KUBECONFIG="$HOME/.kube/$KUBE_USER-config"
if [ -z $USER ]; then if [ -z $KUBE_USER ]; then
echo "No arguments supplied! Format is ./upsert.sh <SERVER_FQDN> <USER>" echo "No arguments supplied! Format is ./upsert.sh <SERVER_FQDN> <USER>"
exit 1 exit 1
fi fi
if [ -z $SERVER ]; then if [ -z $SERVER ]; then
echo "No server supplied for user $USER" echo "No server supplied for user $KUBE_USER"
exit 1 exit 1
fi fi
if [ $USER = "admin" ]; then if [ $KUBE_USER = "admin" ]; then
echo "Creating admin user for server $SERVER" echo "Creating admin user for server $SERVER"
fi fi
@@ -43,24 +43,24 @@ if [ $? -ne 0 ]; then
fi fi
echo "Generating openssl cert" echo "Generating openssl cert"
docker run -u $UID -it -v $CERT_DIR:/$USER python:latest openssl genrsa -out /$USER/$USER.key 2048 podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl genrsa -out /$KUBE_USER/$KUBE_USER.key 2048
if [ $USER = "admin" ]; then if [ $KUBE_USER = "admin" ]; then
docker run -u $UID -it -v $CERT_DIR:/$USER python:latest openssl req -new -key /$USER/$USER.key -out /$USER/$USER.csr -subj "/CN=$USER/O=system:masters" podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=system:masters"
else else
docker run -u $UID -it -v $CERT_DIR:/$USER python:latest openssl req -new -key /$USER/$USER.key -out /$USER/$USER.csr -subj "/CN=$USER/O=user" podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=user"
fi fi
# /CN=admin/O=manager # /CN=admin/O=manager
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Couldn't create cert with Docker. Are you sure it's running?" echo "Couldn't create cert with Podman. Are you sure it's running?"
exit 1 exit 1
fi fi
echo "Creating namespace dir on server" echo "Creating namespace dir on server"
ssh $SERVER "mkdir -p $SERVER_USER_DIR" ssh $SERVER "mkdir -p $SERVER_USER_DIR"
echo "Copying client csr to server cert dir" echo "Copying client csr to server cert dir"
scp $CERT_DIR/$USER.csr $SERVER:$SERVER_USER_DIR/$USER.csr scp $CERT_DIR/$KUBE_USER.csr $SERVER:$SERVER_USER_DIR/$KUBE_USER.csr
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Failed to copy client csr to server cert dir" echo "Failed to copy client csr to server cert dir"
@@ -87,39 +87,39 @@ exit 1
fi fi
echo "Signing cert with pod $CERT_POD" echo "Signing cert with pod $CERT_POD"
ssh $SERVER "kubectl -n kube-system cp $SERVER_USER_DIR/$USER.csr $CERT_POD:/certs/$USER.csr" ssh $SERVER "kubectl -n kube-system cp $SERVER_USER_DIR/$KUBE_USER.csr $CERT_POD:/certs/$KUBE_USER.csr"
ssh $SERVER "kubectl -n kube-system exec $CERT_POD -- openssl x509 -in /certs/$USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -set_serial $(python -c 'import random; print(random.randint(1000000000, 9999999999))') -out /certs/$USER.crt -days 5000" ssh $SERVER "kubectl -n kube-system exec $CERT_POD -- openssl x509 -in /certs/$KUBE_USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -set_serial $(python -c 'import random; print(random.randint(1000000000, 9999999999))') -out /certs/$KUBE_USER.crt -days 5000"
ssh $SERVER "kubectl -n kube-system cp $CERT_POD:/certs/$USER.crt ~/.kube/users/$USER/$USER.crt" ssh $SERVER "kubectl -n kube-system cp $CERT_POD:/certs/$KUBE_USER.crt ~/.kube/users/$KUBE_USER/$KUBE_USER.crt"
echo "retrieving signed cert" echo "retrieving signed cert"
scp $SERVER:$SERVER_USER_DIR/$USER.crt $CERT_DIR/$USER.crt scp $SERVER:$SERVER_USER_DIR/$KUBE_USER.crt $CERT_DIR/$KUBE_USER.crt
echo "retrieving server ca" echo "retrieving server ca"
wget --no-check-certificate https://$FQDN:6443/cacerts -O $CA_CERT_DIR/server-ca.pem wget --no-check-certificate https://$FQDN:6443/cacerts -O $CA_CERT_DIR/server-ca.pem
echo "creating $FQDN-$USER context" echo "creating $FQDN-$KUBE_USER context"
kubectl config set-context $FQDN-$USER kubectl config set-context $FQDN-$KUBE_USER
echo "setting $FQDN-$USER as current context" echo "setting $FQDN-$KUBE_USER as current context"
kubectl config set current-context $FQDN-$USER kubectl config set current-context $FQDN-$KUBE_USER
echo "adding server to config with new context $FQDN-$USER" echo "adding server to config with new context $FQDN-$KUBE_USER"
kubectl config set-cluster $FQDN --server=https://$FQDN:6443 --certificate-authority=$CA_CERT_DIR/server-ca.pem kubectl config set-cluster $FQDN --server=https://$FQDN:6443 --certificate-authority=$CA_CERT_DIR/server-ca.pem
kubectl config set contexts.$(kubectl config current-context).cluster $FQDN kubectl config set contexts.$(kubectl config current-context).cluster $FQDN
echo "adding user to config file" echo "adding user to config file"
kubectl config set-credentials $SERVER_USER --client-certificate=$CERT_DIR/$USER.crt --client-key=$CERT_DIR/$USER.key kubectl config set-credentials $SERVER_USER --client-certificate=$CERT_DIR/$KUBE_USER.crt --client-key=$CERT_DIR/$KUBE_USER.key
echo "setting user context" echo "setting user context"
kubectl config set contexts.$(kubectl config current-context).user $SERVER_USER kubectl config set contexts.$(kubectl config current-context).user $SERVER_USER
if [ $USER = "admin" ]; then if [ $KUBE_USER = "admin" ]; then
echo "Admin user created, skipping namespace" echo "Admin user created, skipping namespace"
echo "export KUBECONFIG=$KUBECONFIG" echo "export KUBECONFIG=$KUBECONFIG"
exit 0 exit 0
fi fi
echo "Templating namespace with helm and copying to server" echo "Templating namespace with helm and copying to server"
helm template $USER --set user=$USER ./helm/namespace | ssh $SERVER "cat - > $SERVER_USER_DIR/namespace.yaml" helm template $KUBE_USER --set user=$KUBE_USER ./helm/namespace | ssh $SERVER "cat - > $SERVER_USER_DIR/namespace.yaml"
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Failed to template namespace. Is helm installed?" echo "Failed to template namespace. Is helm installed?"
@@ -130,7 +130,7 @@ echo "Creating namespace from template"
ssh $SERVER "kubectl apply -f $SERVER_USER_DIR/namespace.yaml" ssh $SERVER "kubectl apply -f $SERVER_USER_DIR/namespace.yaml"
echo "Setting namespace context" echo "Setting namespace context"
kubectl config set contexts.$(kubectl config current-context).namespace $USER kubectl config set contexts.$(kubectl config current-context).namespace $KUBE_USER
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Failed to create namespace" echo "Failed to create namespace"