nginx load balancing internal/external working
This commit is contained in:
@@ -111,7 +111,9 @@ our own components.
|
||||
"--disable" \
|
||||
"servicelb" \
|
||||
"--cluster-dns" \
|
||||
"10.43.0.10"
|
||||
"10.43.0.10" \
|
||||
"--tls-san" \
|
||||
"kube.reeselink.com" \
|
||||
|
||||
3. Join each server node
|
||||
|
||||
@@ -129,7 +131,9 @@ our own components.
|
||||
"--disable" \
|
||||
"servicelb" \
|
||||
"--cluster-dns" \
|
||||
"10.43.0.10"
|
||||
"10.43.0.10" \
|
||||
"--tls-san" \
|
||||
"kube.reeselink.com" \
|
||||
|
||||
Now you can change the ownership of (and copy) the k3s.yaml file:
|
||||
|
||||
|
||||
@@ -64,11 +64,16 @@ which explains how to set up a basic fedora server hosting platform with certbot
|
||||
We'll use our own coredns server so we can add custom hosts. This prevents the server from collapsing
|
||||
if the internet drops out (something that apparently happens quite frequently)
|
||||
|
||||
One key entry in the coredns config is `driveripper.reeselink.com` pointing to the internal
|
||||
IP `172.20.0.1`. This ensures democratic-csi can access the truenas server without internet
|
||||
or DNS.
|
||||
|
||||
```bash
|
||||
helm repo add coredns https://coredns.github.io/helm
|
||||
helm repo update
|
||||
helm upgrade --install \
|
||||
--namespace=kube-system \
|
||||
--namespace=coredns \
|
||||
--create-namespace \
|
||||
--values coredns-values.yaml \
|
||||
coredns \
|
||||
coredns/coredns
|
||||
|
||||
@@ -9,7 +9,7 @@ kubernetes:
|
||||
|
||||
colors:
|
||||
hosts:
|
||||
orange:
|
||||
# orange:
|
||||
yellow:
|
||||
|
||||
apt:
|
||||
|
||||
@@ -128,6 +128,8 @@ servers:
|
||||
parameters: /etc/coredns/democratic-csi-server.reeselink.db democratic-csi-server.reeselink.com
|
||||
- name: file
|
||||
parameters: /etc/coredns/democratic-csi-client.reeselink.db democratic-csi-client.reeselink.com
|
||||
- name: file
|
||||
parameters: /etc/coredns/driveripper.reeselink.db driveripper.reeselink.com
|
||||
- name: forward
|
||||
parameters: . /etc/resolv.conf
|
||||
- name: cache
|
||||
@@ -232,6 +234,13 @@ zoneFiles:
|
||||
democratic-csi-client.reeselink.com. IN NS b.iana-servers.net.
|
||||
democratic-csi-client.reeselink.com. IN NS a.iana-servers.net.
|
||||
democratic-csi-client.reeselink.com. IN A 127.0.0.1
|
||||
- filename: driveripper.reeselink.db
|
||||
domain: driveripper.reeselink.com
|
||||
contents: |
|
||||
driveripper.reeselink.com. IN SOA sns.dns.icann.org. noc.dns.icann.org. 2015082541 7200 3600 1209600 3600
|
||||
driveripper.reeselink.com. IN NS b.iana-servers.net.
|
||||
driveripper.reeselink.com. IN NS a.iana-servers.net.
|
||||
driveripper.reeselink.com. IN A 172.20.0.1
|
||||
|
||||
# optional array of extra volumes to create
|
||||
extraVolumes: []
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
# DNS Server
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
ansible-playbook -i ansible/inventory.yaml dns/dns.yaml
|
||||
```
|
||||
@@ -1,3 +0,0 @@
|
||||
server=10.1.0.1
|
||||
cache-size=1000
|
||||
address=/.reeseapps.com/10.1.203.197
|
||||
25
dns/dns.yaml
25
dns/dns.yaml
@@ -1,25 +0,0 @@
|
||||
- name: Update dnsmasq server
|
||||
hosts: dns
|
||||
become: true
|
||||
become_user: root
|
||||
become_method: sudo
|
||||
tasks:
|
||||
- name: Ensure dnsmasq is installed
|
||||
ansible.builtin.apt:
|
||||
pkg:
|
||||
- dnsmasq
|
||||
- dnsutils
|
||||
- name: Copy dns configurations
|
||||
template:
|
||||
src: "{{ item }}"
|
||||
dest: /etc/dnsmasq.d/{{ item | basename }}
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
with_fileglob:
|
||||
- conf.d/*
|
||||
- name: Reload dnsmasq service
|
||||
ansible.builtin.systemd_service:
|
||||
state: restarted
|
||||
name: dnsmasq
|
||||
enabled: true
|
||||
@@ -9,3 +9,5 @@ controller:
|
||||
use-forwarded-headers: "true"
|
||||
compute-full-forwarded-for: "true"
|
||||
proxy-real-ip-cidr: "0.0.0.0/0"
|
||||
use-proxy-protocol: "true"
|
||||
log-format-upstream: '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Referer: $http_referer | $request | $time_local | $status |'
|
||||
|
||||
@@ -14,3 +14,18 @@ ansible-playbook -i ansible/inventory.yaml nginx/nginx.yaml
|
||||
|
||||
We can detect whether traffic is originating internally or externally by checking if
|
||||
it came in on port 443 or 444.
|
||||
|
||||
External traffic always come in through 444.
|
||||
|
||||
## Certbot
|
||||
|
||||
Use `certbot delete` to remove unused certs.
|
||||
|
||||
## vars.yaml
|
||||
|
||||
`allowed_ips` restricts access to the endpoint (deny all) and then allows only the list
|
||||
of ips provided.
|
||||
|
||||
## Logging
|
||||
|
||||
You can tail all the nginx logs with `tail -f /var/log/nginx/*`
|
||||
|
||||
@@ -16,7 +16,3 @@
|
||||
ansible.builtin.shell: /usr/bin/certbot certonly --dns-route53 -d '{{ item.1 }}' -n
|
||||
# Loops over every external.domains sub list
|
||||
loop: "{{ http | subelements('external.domains') }}"
|
||||
- name: Start nginx service
|
||||
ansible.builtin.systemd_service:
|
||||
state: reloaded
|
||||
name: nginx
|
||||
|
||||
@@ -4,25 +4,33 @@ map $http_upgrade $connection_upgrade {
|
||||
}
|
||||
|
||||
server {
|
||||
access_log /var/log/nginx/nginx_https_access.log basic;
|
||||
error_log /var/log/nginx/nginx_https_error.log warn;
|
||||
|
||||
listen 127.0.0.1:443 ssl;
|
||||
{%- for port in item.0.external.ports +%}
|
||||
listen 127.0.0.1:{{ port }} ssl proxy_protocol;
|
||||
{%- endfor +%}
|
||||
listen 127.0.0.1:80;
|
||||
|
||||
if ($scheme = "http") {
|
||||
return 301 https://$host:443$request_uri;
|
||||
}
|
||||
|
||||
set_real_ip_from 127.0.0.1;
|
||||
|
||||
server_name {{ item.1 }};
|
||||
|
||||
access_log /var/log/nginx/{{ item.1 }}-access.log compression;
|
||||
|
||||
http2 on;
|
||||
|
||||
location / {
|
||||
proxy_pass {{ item.0.internal.protocol }}://{{ item.0.internal.ip }}:{{ item.0.internal.port }}$request_uri;
|
||||
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_set_header X-Forwarded-Scheme $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Accept-Encoding "";
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $proxy_protocol_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
|
||||
|
||||
client_body_buffer_size 512k;
|
||||
proxy_read_timeout 86400s;
|
||||
@@ -45,13 +53,11 @@ server {
|
||||
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
# Optional settings:
|
||||
|
||||
# OCSP stapling
|
||||
# ssl_stapling on;
|
||||
# ssl_stapling_verify on;
|
||||
# ssl_trusted_certificate /etc/letsencrypt/live/<your-nc-domain>/chain.pem;
|
||||
ssl_stapling on;
|
||||
ssl_stapling_verify on;
|
||||
ssl_trusted_certificate /etc/letsencrypt/live/{{ item.1 }}/fullchain.pem;
|
||||
|
||||
# replace with the IP address of your resolver
|
||||
# resolver 127.0.0.1; # needed for oscp stapling: e.g. use 94.140.15.15 for adguard / 1.1.1.1 for cloudflared or 8.8.8.8 for google - you can use the same nameserver as listed in your /etc/resolv.conf file
|
||||
resolver 127.0.0.1;
|
||||
}
|
||||
|
||||
121
nginx/nginx.conf
121
nginx/nginx.conf
@@ -1,3 +1,27 @@
|
||||
{%- set unique_ports = [] %}
|
||||
|
||||
{%- for port in default_ports %}
|
||||
{{- unique_ports.append(port) }}
|
||||
{%- endfor %}
|
||||
|
||||
# For each domain we want to terminate, forward to internal http server
|
||||
{%- set http_domains = [] %}
|
||||
{%- for item in (http | subelements('external.domains')) %}
|
||||
|
||||
{#- Collect unique domains #}
|
||||
{%- if item.1 not in http_domains %}
|
||||
{{- http_domains.append(item.1) }}
|
||||
{%- endif %}
|
||||
|
||||
{#- Collect unique ports #}
|
||||
{%- for port in item.0.external.ports %}
|
||||
{%- if port not in unique_ports %}
|
||||
{{- unique_ports.append(port) }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
|
||||
{%- endfor %}
|
||||
|
||||
load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
|
||||
|
||||
worker_processes 8;
|
||||
@@ -5,55 +29,98 @@ worker_processes 8;
|
||||
events {}
|
||||
|
||||
stream {
|
||||
log_format basic '$remote_addr $domain [$time_local] '
|
||||
'$protocol $status $bytes_sent $bytes_received '
|
||||
'$session_time';
|
||||
|
||||
include /etc/nginx/stream.d/*.conf;
|
||||
log_format basic '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward IP: $forward_ip:$upstream_port | Upstream Addr: $upstream_addr | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
|
||||
|
||||
# Map all SSL parsed server names to hosts
|
||||
map $ssl_preread_server_name $domain {
|
||||
map $ssl_preread_server_name $forward_ip {
|
||||
|
||||
"" 127.0.0.1:443;
|
||||
# Empty ssl preread gets forwarded to internal
|
||||
"" 127.0.0.1;
|
||||
|
||||
# For each domain we need to stream to a remote server, forward to internal ip
|
||||
{% for item in (stream | subelements('external.domains')) %}
|
||||
{{ item.1 }} {{ item.0.internal.ip }}:{{ item.0.internal.port }};
|
||||
{% endfor %}
|
||||
{% for item in http_domains %}
|
||||
{{ item }} 127.0.0.1;
|
||||
{% endfor %}
|
||||
|
||||
# For each domain we want to terminate, forward to internal http server
|
||||
{% for item in (http | subelements('external.domains')) %}
|
||||
{{ item.1 }} 127.0.0.1:443;
|
||||
{% endfor %}
|
||||
|
||||
default {{ nginx.defaults.domain }}:443;
|
||||
default {{ nginx.defaults.ip }};
|
||||
}
|
||||
|
||||
# Since external traffic will be coming in on port 444, and we need to get some of that traffic
|
||||
# to kubernetes ingress-nginx on port 443, we need to detect if the destination IP is kubernetes.
|
||||
# If it is, forward that traffic to port 443. Otherwise, preserve the original port the traffic
|
||||
# came in on.
|
||||
map $forward_ip $upstream_port {
|
||||
{{ nginx.defaults.ip }} 443;
|
||||
default $server_port;
|
||||
}
|
||||
|
||||
# Forward 443 traffic
|
||||
server {
|
||||
access_log /var/log/nginx/stream-access-443.log basic;
|
||||
listen {{ ansible_default_ipv4.address }}:443;
|
||||
resolver 1.1.1.1;
|
||||
proxy_pass $domain;
|
||||
access_log /var/log/nginx/nginx_stream_access.log basic;
|
||||
error_log /var/log/nginx/nginx_stream_error.log warn;
|
||||
|
||||
proxy_protocol on;
|
||||
|
||||
{% for port in unique_ports %}
|
||||
listen {{ ansible_default_ipv4.address }}:{{ port }};
|
||||
{% endfor %}
|
||||
|
||||
proxy_pass $forward_ip:$upstream_port;
|
||||
ssl_preread on;
|
||||
proxy_socket_keepalive on;
|
||||
}
|
||||
|
||||
include /etc/nginx/stream.d/*.conf;
|
||||
}
|
||||
|
||||
http {
|
||||
log_format compression '$remote_addr - $remote_user [$time_local] '
|
||||
'"$request" $status $bytes_sent '
|
||||
'"$http_referer" "$http_user_agent" "$gzip_ratio"';
|
||||
log_format basic '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Forward IP: $forward_ip | Referer: $http_referer | $request | $time_local | $status |';
|
||||
|
||||
map $host $forward_ip {
|
||||
"" "";
|
||||
|
||||
{% for item in http_domains %}
|
||||
{{ item }} "";
|
||||
{% endfor %}
|
||||
|
||||
default {{ nginx.defaults.ip }};
|
||||
}
|
||||
|
||||
# Internal requests come through 80
|
||||
server {
|
||||
access_log /var/log/nginx/http-access.log compression;
|
||||
access_log /var/log/nginx/nginx_http_access.log basic;
|
||||
error_log /var/log/nginx/nginx_http_error.log warn;
|
||||
|
||||
listen 80 default_server;
|
||||
listen 127.0.0.1:80 default_server proxy_protocol;
|
||||
|
||||
location / {
|
||||
# If we have a foward IP, forward the traffic
|
||||
if ($forward_ip) {
|
||||
proxy_pass $forward_ip:80;
|
||||
}
|
||||
# Else redirect if the scheme is http
|
||||
if ($scheme = "http") {
|
||||
return 301 https://$host:443$request_uri;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# External requests come through 81
|
||||
server {
|
||||
access_log /var/log/nginx/nginx_http_access.log basic;
|
||||
error_log /var/log/nginx/nginx_http_error.log warn;
|
||||
|
||||
listen 127.0.0.1:81 default_server proxy_protocol;
|
||||
|
||||
location / {
|
||||
# If we have a foward IP, forward the traffic
|
||||
if ($forward_ip) {
|
||||
proxy_pass $forward_ip:81;
|
||||
}
|
||||
# Else redirect if the scheme is http
|
||||
if ($scheme = "http") {
|
||||
return 301 https://$host:443$request_uri;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
include /etc/nginx/http.d/*.conf;
|
||||
}
|
||||
|
||||
@@ -58,13 +58,15 @@
|
||||
- name: Template all http configurations
|
||||
template:
|
||||
src: https.conf
|
||||
dest: /etc/nginx/http.d/{{ item.1 }}.conf
|
||||
dest: /etc/nginx/http.d/{{ item.1 }}.{{ item.0.internal.port }}.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
# item.0 == full dictionary
|
||||
# item.1 == external domain
|
||||
loop: "{{ http | subelements('external.domains') }}"
|
||||
- name: Test nginx configuration
|
||||
ansible.builtin.shell: /usr/sbin/nginx -t
|
||||
- name: Reload nginx service
|
||||
ansible.builtin.systemd_service:
|
||||
state: restarted
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
server {
|
||||
access_log /var/log/nginx/gitea-ssh.log basic;
|
||||
access_log /var/log/nginx/nginx_stream_access.log basic;
|
||||
error_log /var/log/nginx/nginx_stream_error.log warn;
|
||||
|
||||
listen {{ ansible_default_ipv4.address }}:2222;
|
||||
proxy_pass lb.reeselink.com:2222;
|
||||
proxy_pass 10.1.2.100:2222;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
server {
|
||||
access_log /var/log/nginx/iperf.log basic;
|
||||
access_log /var/log/nginx/nginx_stream_access.log basic;
|
||||
error_log /var/log/nginx/nginx_stream_error.log warn;
|
||||
|
||||
listen {{ ansible_default_ipv4.address }}:5201;
|
||||
listen {{ ansible_default_ipv4.address }}:5201 udp;
|
||||
proxy_pass 127.0.0.1:5201;
|
||||
|
||||
13
nginx/stream.d/kube.conf
Normal file
13
nginx/stream.d/kube.conf
Normal file
@@ -0,0 +1,13 @@
|
||||
upstream kube_backend {
|
||||
server 10.1.2.13:6443 max_fails=2 fail_timeout=30s;
|
||||
server 10.1.2.14:6443 max_fails=2 fail_timeout=30s;
|
||||
server 10.1.2.15:6443 max_fails=2 fail_timeout=30s;
|
||||
}
|
||||
|
||||
server {
|
||||
access_log /var/log/nginx/nginx_stream_access.log basic;
|
||||
error_log /var/log/nginx/nginx_stream_error.log warn;
|
||||
|
||||
listen {{ ansible_default_ipv4.address }}:6443;
|
||||
proxy_pass kube_backend;
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
server {
|
||||
access_log /var/log/nginx/unifi-external.log basic;
|
||||
access_log /var/log/nginx/nginx_stream_access.log basic;
|
||||
error_log /var/log/nginx/nginx_stream_error.log warn;
|
||||
|
||||
resolver 1.1.1.1;
|
||||
listen {{ ansible_default_ipv4.address }}:8080;
|
||||
listen {{ ansible_default_ipv4.address }}:8082;
|
||||
proxy_pass {{ unifi_external.domain }}:8080;
|
||||
}
|
||||
|
||||
105
nginx/vars.yaml
105
nginx/vars.yaml
@@ -1,35 +1,54 @@
|
||||
nginx:
|
||||
defaults:
|
||||
domain: nginx.reeselink.com
|
||||
ip: "10.1.2.101"
|
||||
iperf:
|
||||
domain: lb.reeselink.com
|
||||
domain: 10.1.2.100
|
||||
unifi_external:
|
||||
domain: unifi-server1.reeselink.com
|
||||
internal_ip: 10.1.0.0/16
|
||||
cr10se:
|
||||
- external:
|
||||
domains:
|
||||
- cr10se.reeseseal.com
|
||||
port: 443
|
||||
internal:
|
||||
ip: "10.3.165.70"
|
||||
port: 80
|
||||
protocol: http
|
||||
default_ports:
|
||||
- 80
|
||||
- 81
|
||||
- 443
|
||||
- 444
|
||||
|
||||
http:
|
||||
- external:
|
||||
domains:
|
||||
- homeassistant.reeseapps.com
|
||||
- homeassistant.reeselink.com
|
||||
port: 443
|
||||
ports:
|
||||
- 443
|
||||
- 444
|
||||
internal:
|
||||
ip: "10.2.131.2"
|
||||
port: 8123
|
||||
protocol: https
|
||||
- external:
|
||||
domains:
|
||||
- driveripper.reeseapps.com
|
||||
- driveripper.reeselink.com
|
||||
ports:
|
||||
- 443
|
||||
- 444
|
||||
internal:
|
||||
ip: "10.1.2.10"
|
||||
port: 8443
|
||||
protocol: https
|
||||
- external:
|
||||
domains:
|
||||
- replicator.reeselink.com
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: "10.2.224.77"
|
||||
port: 80
|
||||
protocol: http
|
||||
- external:
|
||||
domains:
|
||||
- yellow.reeselink.com
|
||||
port: 443
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: "10.1.203.197"
|
||||
port: 9090
|
||||
@@ -37,7 +56,8 @@ http:
|
||||
- external:
|
||||
domains:
|
||||
- node1.reeselink.com
|
||||
port: 443
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: "10.1.2.13"
|
||||
port: 9090
|
||||
@@ -45,7 +65,8 @@ http:
|
||||
- external:
|
||||
domains:
|
||||
- node2.reeselink.com
|
||||
port: 443
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: "10.1.2.14"
|
||||
port: 9090
|
||||
@@ -53,17 +74,57 @@ http:
|
||||
- external:
|
||||
domains:
|
||||
- node3.reeselink.com
|
||||
port: 443
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: "10.1.2.15"
|
||||
port: 9090
|
||||
protocol: https
|
||||
|
||||
stream:
|
||||
# Printer
|
||||
- external:
|
||||
domains:
|
||||
- containers.reeseapps.com
|
||||
port: 443
|
||||
- cr10se.reeselink.com
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: "10.1.2.13"
|
||||
port: 6443
|
||||
ip: "10.3.165.70"
|
||||
port: 80
|
||||
protocol: http
|
||||
# Websocket
|
||||
- external:
|
||||
domains:
|
||||
- cr10se.reeselink.com
|
||||
ports:
|
||||
- 9999
|
||||
internal:
|
||||
ip: "10.3.165.70"
|
||||
port: 9999
|
||||
protocol: http
|
||||
# Camera
|
||||
- external:
|
||||
domains:
|
||||
- cr10se.reeselink.com
|
||||
ports:
|
||||
- 8080
|
||||
internal:
|
||||
ip: "10.3.165.70"
|
||||
port: 8080
|
||||
protocol: http
|
||||
- external:
|
||||
domains:
|
||||
- pihole.reeselink.com
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: 10.1.203.197
|
||||
port: 8081
|
||||
protocol: http
|
||||
- external:
|
||||
domains:
|
||||
- attmodem.reeselink.com
|
||||
ports:
|
||||
- 443
|
||||
internal:
|
||||
ip: 192.168.1.254
|
||||
port: 80
|
||||
protocol: http
|
||||
|
||||
@@ -5,11 +5,7 @@
|
||||
- [Podman systemd files](#podman-systemd-files)
|
||||
- [iperf3](#iperf3)
|
||||
- [pihole](#pihole)
|
||||
- [Grafana](#grafana)
|
||||
- [Nginx](#nginx)
|
||||
- [Nginx Build](#nginx-build)
|
||||
- [Nginx Run](#nginx-run)
|
||||
- [Quadlet Generation](#quadlet-generation)
|
||||
- [Cloudflared](#cloudflared)
|
||||
- [Update yellow/orange](#update-yelloworange)
|
||||
|
||||
## Notes
|
||||
@@ -53,10 +49,12 @@ podman run \
|
||||
|
||||
### pihole
|
||||
|
||||
<https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts>
|
||||
|
||||
```bash
|
||||
podman run \
|
||||
-v ./compose:/compose \
|
||||
-v ./quadlets:/quadlets \
|
||||
-v ./podman/compose:/compose \
|
||||
-v ./podman/quadlets:/quadlets \
|
||||
quay.io/k9withabone/podlet \
|
||||
-f /quadlets \
|
||||
-i \
|
||||
@@ -66,56 +64,25 @@ podman run \
|
||||
compose /compose/pihole-compose.yaml
|
||||
```
|
||||
|
||||
### Grafana
|
||||
### Cloudflared
|
||||
|
||||
Creates a DOH proxy for pihole. Just set the pihole upstream to `10.1.203.197#5053`.
|
||||
|
||||
```bash
|
||||
podman run \
|
||||
-v ./compose:/compose \
|
||||
-v ./quadlets:/quadlets \
|
||||
-v ./podman/compose:/compose \
|
||||
-v ./podman/quadlets:/quadlets \
|
||||
quay.io/k9withabone/podlet \
|
||||
-f /quadlets \
|
||||
-i \
|
||||
--overwrite \
|
||||
compose /compose/grafana-compose.yaml
|
||||
--wants network-online.target \
|
||||
--after network-online.target \
|
||||
compose /compose/cloudflared-compose.yaml
|
||||
```
|
||||
|
||||
### Nginx
|
||||
|
||||
nginx proxies all other services.
|
||||
|
||||
#### Nginx Build
|
||||
## Update yellow/orange
|
||||
|
||||
```bash
|
||||
podman build -f nginx-stream/Containerfile -t docker.io/ducoterra/nginx-stream:latest
|
||||
podman build -f dns/Containerfile -t docker.io/ducoterra/nginx-stream-dns:latest
|
||||
|
||||
podman push docker.io/ducoterra/nginx-stream:latest
|
||||
podman push docker.io/ducoterra/nginx-stream-dns:latest
|
||||
|
||||
podman-compose -f compose/nginx-compose.yaml up -d
|
||||
```
|
||||
|
||||
#### Nginx Run
|
||||
|
||||
```bash
|
||||
podman-compose -f compose/nginx-compose.yaml up
|
||||
```
|
||||
|
||||
#### Quadlet Generation
|
||||
|
||||
```bash
|
||||
podman run \
|
||||
-v ./compose:/compose \
|
||||
-v ./quadlets:/quadlets \
|
||||
quay.io/k9withabone/podlet \
|
||||
-f /quadlets \
|
||||
-i \
|
||||
--overwrite \
|
||||
compose /compose/nginx-stream-compose.yaml
|
||||
```
|
||||
|
||||
#### Update yellow/orange
|
||||
|
||||
```bash
|
||||
ansible-playbook -i ansible/inventory.yaml ansible/update-quadlets.yaml
|
||||
ansible-playbook -i ./ansible/inventory.yaml podman/update-quadlets.yaml
|
||||
```
|
||||
12
podman/compose/cloudflared-compose.yaml
Normal file
12
podman/compose/cloudflared-compose.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: "3"
|
||||
|
||||
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
|
||||
services:
|
||||
cloudflared:
|
||||
container_name: cloudflared
|
||||
image: docker.io/cloudflare/cloudflared:2024.5.0
|
||||
command: proxy-dns --address 0.0.0.0 --port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
|
||||
ports:
|
||||
- "0.0.0.0:5053:5053/tcp"
|
||||
- "0.0.0.0:5053:5053/udp"
|
||||
restart: unless-stopped
|
||||
@@ -4,7 +4,7 @@ version: "3"
|
||||
services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
image: pihole/pihole:latest
|
||||
image: docker.io/pihole/pihole:2024.05.0
|
||||
ports:
|
||||
- "0.0.0.0:53:53/tcp"
|
||||
- "0.0.0.0:53:53/udp"
|
||||
15
podman/quadlets/cloudflared.container
Normal file
15
podman/quadlets/cloudflared.container
Normal file
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Wants=network-online.target
|
||||
|
||||
[Container]
|
||||
ContainerName=cloudflared
|
||||
Exec=proxy-dns --address 0.0.0.0 --port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
|
||||
Image=docker.io/cloudflare/cloudflared:2024.5.0
|
||||
PublishPort=0.0.0.0:5053:5053/tcp
|
||||
PublishPort=0.0.0.0:5053:5053/udp
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
@@ -3,8 +3,8 @@ Wants=network-online.target
|
||||
|
||||
[Container]
|
||||
ContainerName=pihole
|
||||
Environment=TZ=America/Chicago "WEBPASSWORD=SET A PASSWORD HERE"
|
||||
Image=pihole/pihole:latest
|
||||
Environment=TZ=America/Chicago
|
||||
Image=docker.io/pihole/pihole:2024.05.0
|
||||
PublishPort=0.0.0.0:53:53/tcp
|
||||
PublishPort=0.0.0.0:53:53/udp
|
||||
PublishPort=0.0.0.0:8081:80/tcp
|
||||
@@ -11,9 +11,15 @@
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
with_items:
|
||||
- ../quadlets/iperf3.container
|
||||
- ../quadlets/pihole.container
|
||||
loop:
|
||||
- ./quadlets/iperf3.container
|
||||
- ./quadlets/pihole.container
|
||||
- ./quadlets/cloudflared.container
|
||||
- name: Daemon-reload to trigger re-read of quadlets
|
||||
ansible.builtin.systemd_service:
|
||||
daemon_reload: true
|
||||
- name: Restart all quadlet services
|
||||
ansible.builtin.systemd_service:
|
||||
state: restarted
|
||||
name: "{{ item }}"
|
||||
loop: ["pihole", "iperf3", "cloudflared"]
|
||||
@@ -9,28 +9,28 @@
|
||||
|
||||
export SERVER=$1
|
||||
export FQDN=$2
|
||||
export USER=$3
|
||||
export KUBE_USER=$3
|
||||
|
||||
export CERT_DIR=$HOME/.kube/$FQDN/users/$USER
|
||||
export CERT_DIR=$HOME/.kube/$FQDN/users/$KUBE_USER
|
||||
export CA_CERT_DIR=$HOME/.kube/$FQDN
|
||||
|
||||
export SERVER_USER_DIR="~/.kube/users/$USER"
|
||||
export SERVER_USER_DIR="~/.kube/users/$KUBE_USER"
|
||||
export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g')
|
||||
export SERVER_USER="$USER-$SERVER_NAME"
|
||||
export SERVER_USER="$KUBE_USER-$SERVER_NAME"
|
||||
|
||||
export KUBECONFIG="$HOME/.kube/$USER-config"
|
||||
export KUBECONFIG="$HOME/.kube/$KUBE_USER-config"
|
||||
|
||||
if [ -z $USER ]; then
|
||||
if [ -z $KUBE_USER ]; then
|
||||
echo "No arguments supplied! Format is ./upsert.sh <SERVER_FQDN> <USER>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $SERVER ]; then
|
||||
echo "No server supplied for user $USER"
|
||||
echo "No server supplied for user $KUBE_USER"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $USER = "admin" ]; then
|
||||
if [ $KUBE_USER = "admin" ]; then
|
||||
echo "Creating admin user for server $SERVER"
|
||||
fi
|
||||
|
||||
@@ -43,24 +43,24 @@ if [ $? -ne 0 ]; then
|
||||
fi
|
||||
|
||||
echo "Generating openssl cert"
|
||||
docker run -u $UID -it -v $CERT_DIR:/$USER python:latest openssl genrsa -out /$USER/$USER.key 2048
|
||||
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl genrsa -out /$KUBE_USER/$KUBE_USER.key 2048
|
||||
|
||||
if [ $USER = "admin" ]; then
|
||||
docker run -u $UID -it -v $CERT_DIR:/$USER python:latest openssl req -new -key /$USER/$USER.key -out /$USER/$USER.csr -subj "/CN=$USER/O=system:masters"
|
||||
if [ $KUBE_USER = "admin" ]; then
|
||||
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=system:masters"
|
||||
else
|
||||
docker run -u $UID -it -v $CERT_DIR:/$USER python:latest openssl req -new -key /$USER/$USER.key -out /$USER/$USER.csr -subj "/CN=$USER/O=user"
|
||||
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=user"
|
||||
fi
|
||||
# /CN=admin/O=manager
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Couldn't create cert with Docker. Are you sure it's running?"
|
||||
echo "Couldn't create cert with Podman. Are you sure it's running?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Creating namespace dir on server"
|
||||
ssh $SERVER "mkdir -p $SERVER_USER_DIR"
|
||||
echo "Copying client csr to server cert dir"
|
||||
scp $CERT_DIR/$USER.csr $SERVER:$SERVER_USER_DIR/$USER.csr
|
||||
scp $CERT_DIR/$KUBE_USER.csr $SERVER:$SERVER_USER_DIR/$KUBE_USER.csr
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to copy client csr to server cert dir"
|
||||
@@ -87,39 +87,39 @@ exit 1
|
||||
fi
|
||||
|
||||
echo "Signing cert with pod $CERT_POD"
|
||||
ssh $SERVER "kubectl -n kube-system cp $SERVER_USER_DIR/$USER.csr $CERT_POD:/certs/$USER.csr"
|
||||
ssh $SERVER "kubectl -n kube-system exec $CERT_POD -- openssl x509 -in /certs/$USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -set_serial $(python -c 'import random; print(random.randint(1000000000, 9999999999))') -out /certs/$USER.crt -days 5000"
|
||||
ssh $SERVER "kubectl -n kube-system cp $CERT_POD:/certs/$USER.crt ~/.kube/users/$USER/$USER.crt"
|
||||
ssh $SERVER "kubectl -n kube-system cp $SERVER_USER_DIR/$KUBE_USER.csr $CERT_POD:/certs/$KUBE_USER.csr"
|
||||
ssh $SERVER "kubectl -n kube-system exec $CERT_POD -- openssl x509 -in /certs/$KUBE_USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -set_serial $(python -c 'import random; print(random.randint(1000000000, 9999999999))') -out /certs/$KUBE_USER.crt -days 5000"
|
||||
ssh $SERVER "kubectl -n kube-system cp $CERT_POD:/certs/$KUBE_USER.crt ~/.kube/users/$KUBE_USER/$KUBE_USER.crt"
|
||||
echo "retrieving signed cert"
|
||||
scp $SERVER:$SERVER_USER_DIR/$USER.crt $CERT_DIR/$USER.crt
|
||||
scp $SERVER:$SERVER_USER_DIR/$KUBE_USER.crt $CERT_DIR/$KUBE_USER.crt
|
||||
|
||||
echo "retrieving server ca"
|
||||
wget --no-check-certificate https://$FQDN:6443/cacerts -O $CA_CERT_DIR/server-ca.pem
|
||||
|
||||
echo "creating $FQDN-$USER context"
|
||||
kubectl config set-context $FQDN-$USER
|
||||
echo "creating $FQDN-$KUBE_USER context"
|
||||
kubectl config set-context $FQDN-$KUBE_USER
|
||||
|
||||
echo "setting $FQDN-$USER as current context"
|
||||
kubectl config set current-context $FQDN-$USER
|
||||
echo "setting $FQDN-$KUBE_USER as current context"
|
||||
kubectl config set current-context $FQDN-$KUBE_USER
|
||||
|
||||
echo "adding server to config with new context $FQDN-$USER"
|
||||
echo "adding server to config with new context $FQDN-$KUBE_USER"
|
||||
kubectl config set-cluster $FQDN --server=https://$FQDN:6443 --certificate-authority=$CA_CERT_DIR/server-ca.pem
|
||||
kubectl config set contexts.$(kubectl config current-context).cluster $FQDN
|
||||
|
||||
echo "adding user to config file"
|
||||
kubectl config set-credentials $SERVER_USER --client-certificate=$CERT_DIR/$USER.crt --client-key=$CERT_DIR/$USER.key
|
||||
kubectl config set-credentials $SERVER_USER --client-certificate=$CERT_DIR/$KUBE_USER.crt --client-key=$CERT_DIR/$KUBE_USER.key
|
||||
|
||||
echo "setting user context"
|
||||
kubectl config set contexts.$(kubectl config current-context).user $SERVER_USER
|
||||
|
||||
if [ $USER = "admin" ]; then
|
||||
if [ $KUBE_USER = "admin" ]; then
|
||||
echo "Admin user created, skipping namespace"
|
||||
echo "export KUBECONFIG=$KUBECONFIG"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Templating namespace with helm and copying to server"
|
||||
helm template $USER --set user=$USER ./helm/namespace | ssh $SERVER "cat - > $SERVER_USER_DIR/namespace.yaml"
|
||||
helm template $KUBE_USER --set user=$KUBE_USER ./helm/namespace | ssh $SERVER "cat - > $SERVER_USER_DIR/namespace.yaml"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to template namespace. Is helm installed?"
|
||||
@@ -130,7 +130,7 @@ echo "Creating namespace from template"
|
||||
ssh $SERVER "kubectl apply -f $SERVER_USER_DIR/namespace.yaml"
|
||||
|
||||
echo "Setting namespace context"
|
||||
kubectl config set contexts.$(kubectl config current-context).namespace $USER
|
||||
kubectl config set contexts.$(kubectl config current-context).namespace $KUBE_USER
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create namespace"
|
||||
|
||||
Reference in New Issue
Block a user