ipv6 working

This commit is contained in:
2024-06-09 18:35:56 -04:00
parent 642c0011ff
commit 1f4769fdbe
33 changed files with 487 additions and 207 deletions

View File

@@ -1,5 +1,3 @@
# Ansible
## Install
```bash
@@ -11,11 +9,3 @@ pacman -S ansible ansible-core python-kubernetes
```bash
ansible kubernetes -m ping -i inventory.yaml
```
## Updates
```bash
ansible-playbook -i ansible/inventory.yaml ansible/upgrade-kubernetes-nodes.yaml
ansible-playbook -i ansible/inventory.yaml ansible/upgrade-colors.yaml
ansible-playbook -i ansible/inventory.yaml ansible/upgrade-apt.yaml
```

View File

@@ -1,5 +1,5 @@
image:
tag: 1.21.4
tag: 1.22
ingress:
enabled: true

View File

@@ -1,3 +1,3 @@
jellyfin:
image: jellyfin/jellyfin:latest
image: jellyfin/jellyfin:10
domain: jellyfin.reeseapps.com

View File

@@ -14,3 +14,21 @@ spec:
targetPort: 25565
name: {{ .Release.Name }}
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}v6
annotations:
metallb.universe.tf/address-pool: "productionv6"
spec:
externalTrafficPolicy: Cluster
selector:
app: {{ .Release.Name }}
ports:
- port: {{ .Values.port }}
targetPort: 25565
name: {{ .Release.Name }}
type: LoadBalancer

View File

@@ -1,5 +1,5 @@
nextcloud:
image: nextcloud:27.1.9
image: nextcloud:27
domain: nextcloud.reeseapps.com
html:
storageClassName: zfs-iscsi-enc1

View File

@@ -17,3 +17,14 @@ metadata:
spec:
addresses:
- 10.1.2.101/32
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: productionv6
namespace: metallb
spec:
addresses:
- 2600:1700:1e6c:a81f:aaaa::1/64

View File

@@ -17,3 +17,14 @@ metadata:
spec:
ipAddressPools:
- nginx
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: productionv6
namespace: metallb
spec:
ipAddressPools:
- productionv6

36
network/README.md Normal file
View File

@@ -0,0 +1,36 @@
# Network Management
- [Network Management](#network-management)
- [Route53](#route53)
- [Reeselink Addresses](#reeselink-addresses)
## Route53
```bash
aws route53 list-hosted-zones
# reeselink
aws route53 change-resource-record-sets --hosted-zone-id Z0092652G7L97DSINN18 --change-batch file://
# reeseapps
aws route53 change-resource-record-sets --hosted-zone-id Z012820733346FJ0U4FUF --change-batch file://
```
## Reeselink Addresses
These are convenience dns records so you don't have to remember every ip address. IPV6 and IPV4.
```bash
aws route53 change-resource-record-sets --hosted-zone-id Z0092652G7L97DSINN18 --change-batch file://network/reeselink.json
```
You can extract these addresses into a text file with:
```bash
cat network/reeselink.json | jq -c -r '[ .Changes.[] | select( .ResourceRecordSet.Type | contains("AAAA")) ] | .[] | .ResourceRecordSet | .Name,.ResourceRecords.[].Value' > network/ipv6.txt```
## Reeseapps Addresses
```bash
aws route53 change-resource-record-sets --hosted-zone-id Z012820733346FJ0U4FUF --change-batch file://network/reeseapps.json
```

14
network/ipv4.txt Normal file
View File

@@ -0,0 +1,14 @@
Shelly
10.1.197.88
10.1.172.63
10.1.248.70
10.1.142.201
10.1.163.235
10.1.129.124
10.1.195.60
10.1.157.209
10.1.93.31
10.1.223.134
10.1.189.8
10.1.205.155
10.1.96.48

18
network/ipv6.txt Normal file
View File

@@ -0,0 +1,18 @@
driveripper.reeselink.com
2600:1700:1e6c:a81f:94bb:b8ff:fe9f:1c63
yellow.reeselink.com
2600:1700:1e6c:a81f:793d:7abf:e94d:9bc4
orange.reeselink.com
2600:1700:1e6c:a81f:153e:9c35:8ff3:fa3
node1.reeselink.com
2600:1700:1e6c:a81f:2a0:98ff:fe6c:eca7
node2.reeselink.com
2600:1700:1e6c:a81f:2a0:98ff:fe47:6498
node3.reeselink.com
2600:1700:1e6c:a81f:2a0:98ff:fe0f:aba3
homeassistant.reeselink.com
2600:1700:1e6c:a81f:19:a563:8600:2db6
nextcloud-aio.reeselink.com
2600:1700:1e6c:a81f:5054:ff:fe03:880
unifi-external.reeselink.com
2600:1700:1e6c:a81f:5054:ff:fea0:200c

21
network/reeseapps.json Normal file
View File

@@ -0,0 +1,21 @@
{
"Comment": "CREATE/UPSERT/DELETE a record ",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "*.reeseapps.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:793d:7abf:e94d:9bc4"
},
{
"Value": "2600:1700:1e6c:a81f:153e:9c35:8ff3:fa3"
}
]
}
}
]
}

239
network/reeselink.json Normal file
View File

@@ -0,0 +1,239 @@
{
"Comment": "CREATE/UPSERT/DELETE a record ",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "driveripper.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:94bb:b8ff:fe9f:1c63"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "driveripper.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.2.10"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "yellow.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:793d:7abf:e94d:9bc4"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "yellow.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.203.197"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "orange.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:153e:9c35:8ff3:fa3"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "orange.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.200.253"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "node1.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe6c:eca7"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "node1.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.2.13"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "node2.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe47:6498"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "node2.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.2.14"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "node3.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe0f:aba3"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "node3.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.2.15"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "homeassistant.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:19:a563:8600:2db6"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "homeassistant.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.27.89"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "nextcloud-aio.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:5054:ff:fe03:880"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "nextcloud-aio.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.175.237"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "unifi-external.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:5054:ff:fea0:200c"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "unifi-external.reeselink.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": "10.1.241.139"
}
]
}
}
]
}

View File

@@ -12,10 +12,11 @@ ansible-playbook -i ansible/inventory.yaml nginx/nginx.yaml
## Restricted Addresses
We can detect whether traffic is originating internally or externally by checking if
it came in on port 443 or 444.
We'll use nginx basic auth to protect our internal addresses
External traffic always come in through 444.
```bash
htpasswd -c secrets/.htpasswd ducoterra
```
## Certbot

View File

@@ -13,15 +13,9 @@
name:
- certbot
state: present
- name: Get certs for all internal domains
ansible.builtin.shell: /usr/bin/certbot certonly --dns-route53 -d '{{ item.external.domain }}{{ internal_tld }}' -n
# Loops over every external.domains sub list
loop: "{{ http }}"
- name: Get certs for all external domains
ansible.builtin.shell: /usr/bin/certbot certonly --dns-route53 -d '{{ item.external.domain }}{{ expose_tld }}' -n
# Loops over every external.domains sub list
loop: "{{ http }}"
when: item.external.expose
- name: Create certbot renew service
template:
src: service/certbot-renew.service

View File

@@ -1,4 +1,3 @@
# Internal Server
server {
access_log /var/log/nginx/nginx_https_access.log basic;
error_log /var/log/nginx/nginx_https_error.log warn;
@@ -12,109 +11,29 @@ server {
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
# Listen for the default http internal ports
listen 127.0.0.1:{{ defaults.http.internal_http_port }} proxy_protocol;
listen 127.0.0.1:{{ defaults.http.internal_https_port }} ssl proxy_protocol;
# Listen for any extra http ports specified by the user
{% for port in item.external.extra_http_ports %}
listen 127.0.0.1:{{ port }} proxy_protocol;
{% endfor %}
# Listen for any extra https ports specified by the user
{% for port in item.external.extra_https_ports %}
listen 127.0.0.1:{{ port }} ssl proxy_protocol;
{% endfor %}
listen unix:/var/lib/nginx/tmp/nginx_http.sock proxy_protocol;
listen unix:/var/lib/nginx/tmp/nginx_https.sock ssl proxy_protocol;
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
set_real_ip_from 127.0.0.1;
set_real_ip_from unix:;
set_real_ip_from {{ internal_ipv4 }};
set_real_ip_from {{ internal_ipv6 }};
real_ip_header X-Real-IP;
real_ip_recursive on;
server_name {{ item.external.domain }}{{ internal_tld }};
location / {
{% for port in item.external.extra_http_ports %}
if ($server_port = "{{ port }}") {
proxy_pass {{ item.internal.protocol }}://{{ item.internal.ip }}:{{ port }}$request_uri;
}
{% endfor %}
{% for port in item.external.extra_https_ports %}
if ($server_port = "{{ port }}") {
proxy_pass {{ item.internal.protocol }}://{{ item.internal.ip }}:{{ port }}$request_uri;
}
{% endfor %}
if ($server_port = "{{ defaults.http.internal_https_port }}"){
proxy_pass {{ item.internal.protocol }}://{{ item.internal.ip }}:{{ item.internal.port }}$request_uri;
}
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Accept-Encoding "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
client_body_buffer_size 512k;
proxy_read_timeout 86400s;
client_max_body_size 0;
# Websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
ssl_certificate /etc/letsencrypt/live/{{ item.external.domain }}{{ internal_tld }}/fullchain.pem; # managed by certbot on host machine
ssl_certificate_key /etc/letsencrypt/live/{{ item.external.domain }}{{ internal_tld }}/privkey.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers EECDH+CHACHA20:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/letsencrypt/live/{{ item.external.domain }}{{ internal_tld }}/fullchain.pem;
# replace with the IP address of your resolver
resolver 127.0.0.1;
}
# External Server
{% if item.external.expose %}
server {
access_log /var/log/nginx/nginx_https_access.log basic;
error_log /var/log/nginx/nginx_https_error.log warn;
http2 on;
gzip on;
gzip_vary on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
listen 127.0.0.1:{{ defaults.http.external_http_port }} proxy_protocol;
listen 127.0.0.1:{{ defaults.http.external_https_port }} ssl proxy_protocol;
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
set_real_ip_from 127.0.0.1;
server_name {{ item.external.domain }}{{ expose_tld }};
location / {
{% if item.external.protect %}
auth_basic "Restricted";
auth_basic_user_file /etc/nginx/.htpasswd;
{% endif %}
proxy_pass {{ item.internal.protocol }}://{{ item.internal.ip }}:{{ item.internal.port }}$request_uri;
proxy_set_header X-Forwarded-Port $server_port;
@@ -155,4 +74,3 @@ server {
# replace with the IP address of your resolver
resolver 127.0.0.1;
}
{%- endif %}

View File

@@ -27,33 +27,32 @@ events {
}
stream {
log_format basic '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward: $map_forward_ip:$upstream_port | Upstream Addr: $upstream_addr | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
log_format basic '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward: $map_forward_ip$upstream_port | Upstream Addr: $upstream_addr | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
# Map all SSL parsed server names to hosts
map $ssl_preread_server_name $map_forward_ip {
# Empty ssl preread gets forwarded to internal
"" 127.0.0.1;
# Empty ssl preread gets forwarded to internal http server
"" "unix:/var/lib/nginx/tmp/nginx_http.sock";
# These domains will get forwarded to the internal http server
# These domains will get forwarded to the internal https server
{% for item in http %}
{{ item.external.domain }}{{ internal_tld }} 127.0.0.1;
{% if item.external.expose %}
{{ item.external.domain }}{{ expose_tld }} 127.0.0.1;
{% endif %}
{{ item.external.domain }}{{ expose_tld }} unix:/var/lib/nginx/tmp/nginx_https.sock;
{% endfor %}
# By default forward to our internal nginx server (probably kubernetes)
default {{ defaults.forward_ip }};
}
# Since external traffic will be coming in on port 444, and we need to get some of that traffic
# to kubernetes ingress-nginx on port 443, we need to detect if the destination IP is kubernetes.
# If it is, forward that traffic to port 443. Otherwise, preserve the original port the traffic
# came in on.
# Since traffic being forwarded to the unix socket doesn't need a port we'll create
# a map here to enforce that case.
map $map_forward_ip $upstream_port {
{{ defaults.forward_ip }} 443;
default $server_port;
{{ defaults.forward_ip }} ":443";
"unix:/var/lib/nginx/tmp/nginx_http.sock" "";
"unix:/var/lib/nginx/tmp/nginx_https.sock" "";
default ":$server_port";
}
server {
@@ -64,17 +63,17 @@ stream {
# The default http ports
{% for port in defaults.listen_ports %}
listen {{ ansible_default_ipv4.address }}:{{ port }};
listen [{{ ansible_default_ipv6.address }}]:{{ port }};
listen {{ port }};
listen [::]:{{ port }};
{% endfor %}
# Any unique ports listed in the extra_ports field
{% for port in unique_ports %}
listen {{ ansible_default_ipv4.address }}:{{ port }};
listen [{{ ansible_default_ipv6.address }}]:{{ port }};
listen {{ port }};
listen [::]:{{ port }};
{% endfor %}
proxy_pass $map_forward_ip:$upstream_port;
proxy_pass $map_forward_ip$upstream_port;
ssl_preread on;
proxy_socket_keepalive on;
}
@@ -83,33 +82,30 @@ stream {
}
http {
log_format basic '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Forward: $map_forward_ip:$server_port | Referer: $http_referer | $request | $time_local | $status |';
log_format basic '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Forward: $map_forward_ip$server_port | Referer: $http_referer | $request | $time_local | $status |';
map $host $map_forward_ip {
"" "127.0.0.1";
"" "unix:/var/lib/nginx/tmp/nginx_http.sock";
# We don't want to forward traffic we're terminating
# Rather we'll catch it here and redirect to 443.
{% for item in http %}
{{ item.external.domain }}{{ internal_tld }} "127.0.0.1";
{% if item.external.expose %}
{{ item.external.domain }}{{ expose_tld }} "127.0.0.1";
{% endif %}
{{ item.external.domain }}{{ expose_tld }} "unix:/var/lib/nginx/tmp/nginx_https.sock";
{% endfor %}
default {{ defaults.forward_ip }};
}
# Internal requests come through 80
# Handle internal http requests through unix:/var/lib/nginx/tmp/nginx_http.sock
server {
access_log /var/log/nginx/nginx_http_access.log basic;
error_log /var/log/nginx/nginx_http_error.log warn;
listen 127.0.0.1:80 default_server proxy_protocol;
listen unix:/var/lib/nginx/tmp/nginx_http.sock default_server proxy_protocol;
location / {
# If we have an external forward IP, forward traffic
if ($map_forward_ip != "127.0.0.1") {
if ($map_forward_ip != "unix:/var/lib/nginx/tmp/nginx_http.sock") {
proxy_pass $map_forward_ip:80;
}
# Else redirect if the scheme is http
@@ -119,25 +115,6 @@ http {
}
}
# External requests come through 81
server {
access_log /var/log/nginx/nginx_http_access.log basic;
error_log /var/log/nginx/nginx_http_error.log warn;
listen 127.0.0.1:81 default_server proxy_protocol;
location / {
# If we have a foward IP, forward the traffic
if ($map_forward_ip) {
proxy_pass $map_forward_ip:81;
}
# Else redirect if the scheme is http
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
}
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;

View File

@@ -37,6 +37,13 @@
owner: root
group: root
mode: '0644'
- name: Copy .htpasswd
template:
src: ../secrets/.htpasswd
dest: /etc/nginx/.htpasswd
owner: nginx
group: nginx
mode: '0600'
- name: Copy stream configurations
template:
src: "{{ item }}"
@@ -56,8 +63,20 @@
loop: "{{ http }}"
- name: Test nginx configuration
ansible.builtin.shell: /usr/sbin/nginx -t
- name: Stop nginx service
ansible.builtin.systemd_service:
state: stopped
name: nginx
- name: Remove old socket files
file:
path: /var/lib/nginx/tmp/nginx_http.sock
state: absent
- name: Remove old socket files
file:
path: /var/lib/nginx/tmp/nginx_https.sock
state: absent
- name: Reload nginx service
ansible.builtin.systemd_service:
state: restarted
state: started
name: nginx
enabled: true

View File

@@ -2,7 +2,6 @@ server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:2222;
listen [{{ ansible_default_ipv6.address }}]:2222;
listen 2222;
proxy_pass 10.1.2.100:2222;
}

View File

@@ -2,9 +2,7 @@ server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:5201;
listen {{ ansible_default_ipv4.address }}:5201 udp;
listen [{{ ansible_default_ipv6.address }}]:5201;
listen [{{ ansible_default_ipv6.address }}]:5201 udp;
proxy_pass 127.0.0.1:5201;
listen 5201;
listen 5201 udp;
proxy_pass 127.0.0.1:5202;
}

View File

@@ -8,7 +8,6 @@ server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:6443;
listen [{{ ansible_default_ipv6.address }}]:6443;
listen 6443;
proxy_pass kube_backend;
}

View File

@@ -2,7 +2,6 @@ server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen {{ ansible_default_ipv4.address }}:25565-25575;
listen [{{ ansible_default_ipv6.address }}]:25565-25575;
listen 25565-25575;
proxy_pass 10.1.2.100:$server_port;
}

View File

@@ -4,94 +4,98 @@ defaults:
listen_ports:
- 443
- 80
- 444
- 81
http:
internal_http_port: 80
internal_https_port: 443
external_http_port: 81
external_https_port: 444
internal_ip: 10.1.0.0/16
internal_ipv4: 10.1.0.0/16
internal_ipv6: 2600:1700:1e6c:a81f::/64
expose_tld: .reeseapps.com
internal_tld: .reeselink.com
http:
- external:
domain: homeassistant
expose: true
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.2.131.2"
ip: "10.1.27.89"
port: 8123
protocol: https
- external:
domain: driveripper
expose: true
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.2.10"
port: 8443
protocol: https
- external:
domain: e3s1plus
expose: false
protect: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.2.224.77"
port: 80
protocol: http
- external:
domain: yellow
expose: false
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.203.197"
port: 9090
protocol: https
- external:
domain: orange
expose: false
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.200.253"
port: 9090
protocol: https
- external:
domain: node1
expose: false
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.2.13"
port: 9090
protocol: https
- external:
domain: node2
expose: false
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.2.14"
port: 9090
protocol: https
- external:
domain: node3
expose: false
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.2.15"
port: 9090
protocol: https
# Printer
- external:
domain: cr10se
expose: false
protect: true
extra_http_ports: []
extra_https_ports:
# websocket
@@ -102,45 +106,50 @@ http:
ip: "10.3.165.70"
port: 80
protocol: http
- external:
domain: pihole-yellow
expose: false
protect: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.203.197"
port: 8081
protocol: http
- external:
domain: pihole-orange
expose: false
protect: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.200.253"
port: 8081
protocol: http
- external:
domain: attmodem
expose: false
protect: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: 192.168.1.254
port: 80
protocol: http
- external:
domain: nextcloud-aio
expose: true
protect: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: 10.1.175.237
port: 11000
protocol: http
- external:
domain: unifi-external
expose: true
protect: false
extra_http_ports:
- 8080
extra_https_ports: []

View File

@@ -38,8 +38,8 @@ Copy these files to `/usr/share/containers/systemd/`
```bash
podman run \
-v ./compose:/compose \
-v ./quadlets:/quadlets \
-v ./podman/compose:/compose \
-v ./podman/quadlets:/quadlets \
quay.io/k9withabone/podlet \
-f /quadlets \
-i \

View File

@@ -7,6 +7,6 @@ services:
image: docker.io/cloudflare/cloudflared:2024.5.0
command: proxy-dns --address 0.0.0.0 --port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
ports:
- "0.0.0.0:5053:5053/tcp"
- "0.0.0.0:5053:5053/udp"
- "127.0.0.1:5053:5053/tcp"
- "127.0.0.1:5053:5053/udp"
restart: unless-stopped

View File

@@ -5,6 +5,6 @@ services:
container_name: iperf3
image: docker.io/networkstatic/iperf3:latest
ports:
- "127.0.0.1:5201:5201/tcp"
- "127.0.0.1:5202:5201/tcp"
command: -s
restart: unless-stopped

View File

@@ -8,7 +8,7 @@ services:
ports:
- "0.0.0.0:53:53/tcp"
- "0.0.0.0:53:53/udp"
- "0.0.0.0:8081:80/tcp"
- "127.0.0.1:8081:80/tcp"
environment:
TZ: "America/Chicago"
# WEBPASSWORD: "SET A PASSWORD HERE"

View File

@@ -5,8 +5,8 @@ Wants=network-online.target
ContainerName=cloudflared
Exec=proxy-dns --address 0.0.0.0 --port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
Image=docker.io/cloudflare/cloudflared:2024.5.0
PublishPort=0.0.0.0:5053:5053/tcp
PublishPort=0.0.0.0:5053:5053/udp
PublishPort=127.0.0.1:5053:5053/tcp
PublishPort=127.0.0.1:5053:5053/udp
[Service]
Restart=always

View File

@@ -2,7 +2,7 @@
ContainerName=iperf3
Exec=-s
Image=docker.io/networkstatic/iperf3:latest
PublishPort=127.0.0.1:5201:5201/tcp
PublishPort=127.0.0.1:5202:5201/tcp
[Service]
Restart=always

View File

@@ -7,7 +7,7 @@ Environment=TZ=America/Chicago
Image=docker.io/pihole/pihole:2024.05.0
PublishPort=0.0.0.0:53:53/tcp
PublishPort=0.0.0.0:53:53/udp
PublishPort=0.0.0.0:8081:80/tcp
PublishPort=127.0.0.1:8081:80/tcp
Volume=pihole:/etc/pihole
Volume=dnsmasq:/etc/dnsmasq.d

9
updates/README.md Normal file
View File

@@ -0,0 +1,9 @@
# Update and Upgrade Procedures
## Updates
```bash
ansible-playbook -i ansible/inventory.yaml updates/upgrade-kubernetes-nodes.yaml
ansible-playbook -i ansible/inventory.yaml updates/upgrade-colors.yaml
ansible-playbook -i ansible/inventory.yaml updates/upgrade-apt.yaml
```