spectrum ipv6 vlan migration

This commit is contained in:
2024-07-19 10:12:06 -04:00
parent cecd234160
commit 697e08ed78
33 changed files with 160 additions and 933 deletions

13
3d_printing/README.md Normal file
View File

@@ -0,0 +1,13 @@
# 3D Printing Notes
## Bambu P1S
### WiFi
If you want to set up WiFi without logging in follow this:
<https://wiki.bambulab.com/en/p1/manual/p1-sd-card-network-configuration-guide>
### Connecting via LAN mode
- Make sure you open 1990 and 2021 in your firewall application.

View File

@@ -78,6 +78,19 @@
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "unifi.reeseapps.com",
"Type": "A",
"TTL": 300,
"ResourceRecords": [
{
"Value": ""
}
]
}
}
]
}

View File

@@ -9,7 +9,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe14:1bbd"
"Value": "2603:6013:3140:100:2a0:98ff:fe14:1bbd"
}
]
}
@@ -22,7 +22,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:42:acff:fe1e:2101"
"Value": "2603:6013:3140:100:42:acff:fe1e:2101"
}
]
}
@@ -35,7 +35,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe5e:edc3"
"Value": "2603:6013:3140:100:2a0:98ff:fe5e:edc3"
}
]
}

View File

@@ -9,7 +9,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe39:9b5"
"Value": "2603:6013:3140:100:2a0:98ff:fe39:9b5"
}
]
}
@@ -22,7 +22,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe14:1bbd"
"Value": "2603:6013:3140:100:2a0:98ff:fe14:1bbd"
}
]
}
@@ -35,7 +35,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:42:acff:fe1e:2101"
"Value": "2603:6013:3140:100:42:acff:fe1e:2101"
}
]
}
@@ -48,7 +48,7 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:2a0:98ff:fe5e:edc3"
"Value": "2603:6013:3140:100:2a0:98ff:fe5e:edc3"
}
]
}
@@ -61,7 +61,46 @@
"TTL": 300,
"ResourceRecords": [
{
"Value": "2600:1700:1e6c:a81f:94bb:b8ff:fe9f:1c63"
"Value": "2603:6013:3140:100:94bb:b8ff:fe9f:1c63"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "pivpn.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2603:6013:3140:100:dea6:32ff:fe05:1722"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "yellow.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2603:6013:3140:100:664b:f0ff:fe14:dbd"
}
]
}
},
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": "gamebox.reeselink.com",
"Type": "AAAA",
"TTL": 300,
"ResourceRecords": [
{
"Value": "2603:6013:3140:103:7656:3cff:febd:1df8"
}
]
}

View File

@@ -49,15 +49,12 @@ spec:
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
# image: nginx
args:
- --source=service
- --source=ingress
- --domain-filter=reeseapps.com # will make ExternalDNS see only the hosted zones matching provided domain, omit to process all available hosted zones
- --domain-filter=reeselink.com
- --domain-filter=reeseapps.com
- --provider=aws
# - --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
- --aws-zone-type=public # only look at public hosted zones (valid values are public, private or no value for both)
- --aws-zone-type=public
- --registry=txt
- --txt-owner-id=external-dns
env:

View File

@@ -3,9 +3,9 @@ image:
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
apiVersion: networking.k8s.io/v1
@@ -63,7 +63,7 @@ service:
ipFamilies: ["IPv6"]
annotations:
metallb.universe.tf/address-pool: "external"
external-dns.alpha.kubernetes.io/hostname: git.reeseapps.com,git.reeselink.com
external-dns.alpha.kubernetes.io/hostname: git.reeseapps.com
redis-cluster:

View File

@@ -4,10 +4,10 @@ controller:
annotations:
metallb.universe.tf/address-pool: "external"
metallb.universe.tf/allow-shared-ip: nginx
external-dns.alpha.kubernetes.io/hostname: ingress-nginx.reeselink.com
external-dns.alpha.kubernetes.io/hostname: ingress-nginx.reeseapps.com
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv6
config:
log-format-upstream: '| Proxy Proto Addr: $proxy_protocol_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Referer: $http_referer | $request | $time_local | $status |'
log-format-upstream: '| Remote Addr: $remote_addr:$server_port | Host: $host | Referer: $http_referer | $request | $time_local | $status |'
allowSnippetAnnotations: true

View File

@@ -9,6 +9,8 @@
- [Storage](#storage)
- [Coredns](#coredns)
- [Metal LB](#metal-lb)
- [VLAN Setup](#vlan-setup)
- [Installation](#installation)
- [External DNS](#external-dns)
- [Credentials](#credentials)
- [Annotation](#annotation)
@@ -131,6 +133,25 @@ kubectl run -it --rm \
## Metal LB
### VLAN Setup
Before working with Metallb you'll need at least one available VLAN. On Unifi equipment
this is accomplished by creating a new network. Don't assign it to anything.
On the linux machine you can use nmcli or cockpit to configure a new VLAN network interface.
With cockpit:
1. Add a new VLAN network
2. The parent should be the physical adapter connected to your switch
3. Set the VLAN ID to the VLAN number of your created unifi network
4. Click create
5. Click into the new network
6. Turn off IPv4 and IPv6 DNS (it will overload the resolv.conf hosts limit)
7. Turn on the network interface
8. Attempt to ping the acquired address(es)
### Installation
We'll be swapping K3S's default load balancer with Metal LB for more flexibility. ServiceLB was
struggling to allocate IP addresses for load balanced services. MetallLB does make things a little
more complicated- you'll need special annotations (see below) but it's otherwise a well-tested,

View File

@@ -1,127 +0,0 @@
# Service Mesh
This will be handled by wireguard. The goal is to establish encrypted communication between
hosts for iscsi/nfs/http services.
## Install Wireguard
<https://www.wireguard.com/install/>
## Ansible
```bash
ansible-playbook -i ansible/inventory.yaml mesh/keys.yaml
ansible-playbook -i ansible/inventory.yaml mesh/interface.yaml
ansible-playbook -i ansible/inventory.yaml mesh/peers.yaml
```
## DNS Records
Collect DNS records from vars.yaml
```bash
cat mesh/vars.yaml | \
yq -r '.ip | map([.hostname + "-wg.reeselink.com", .address]).[].[]' > dns/duconet-wg.txt
```
## CLI Setup
```bash
# Peer 1
wg genkey | tee /etc/wireguard/privatekey | wg pubkey > /etc/wireguard/publickey
ip link add dev duconet-wg type wireguard
ip address add dev duconet-wg fd00:fd41:d0f1:1010::0/64
wg set duconet-wg \
listen-port 51821 \
private-key /etc/wireguard/privatekey
wg set duconet-wg \
peer CQxNsdPgfzjvOszjn/UZHFdAY3k+D9J+vI8qKUjCYV0= \
allowed-ips '10.10.10.0/24' \
endpoint 10.1.200.253:51821
ip link set up dev duconet-wg
touch /etc/wireguard/duconet-wg.conf
wg-quick save duconet-wg
# Peer 2
wg genkey | tee /etc/wireguard/privatekey | wg pubkey > /etc/wireguard/publickey
ip link add dev duconet-wg type wireguard
ip address add dev duconet-wg 10.10.10.2/24
wg set duconet-wg \
listen-port 51821 \
private-key /etc/wireguard/privatekey \
peer kzbHUGzYk6Uyan/NFYY5mh3pxf2IX/WzWZtImeyp6Sw= \
allowed-ips '10.10.10.0/24' \
endpoint 10.1.203.197:51821
ip link set up dev duconet-wg
touch /etc/wireguard/duconet-wg.conf
wg-quick save duconet-wg
# Peer 3
wg genkey | tee /etc/wireguard/privatekey | wg pubkey > /etc/wireguard/publickey
ip link add dev duconet-wg type wireguard
ip address add dev duconet-wg 10.10.10.3/24
wg set duconet-wg \
listen-port 51821 \
private-key /etc/wireguard/privatekey \
peer kzbHUGzYk6Uyan/NFYY5mh3pxf2IX/WzWZtImeyp6Sw= \
allowed-ips '10.10.10.0/24' \
endpoint 10.1.203.197:51821
wg set duconet-wg \
peer 9/dBUlO9TGf0H9M3xwPiuIuz6Q/u7fSJVZaUxqAiqi8= \
allowed-ips '10.10.10.0/24' \
endpoint 10.1.2.10:51821
ip link set up dev duconet-wg
touch /etc/wireguard/duconet-wg.conf
wg-quick save duconet-wg
```
## Teardown
```bash
# teardown
ip link delete duconet-wg
systemctl disable wg-quick@duconet-wg
```
## Truenas
Because truenas's /etc/wireguard is ephemeral we need to create scripts to save and load
our wireguard config at shutdown/boot.
Select these scripts in system settings -> advanced -> init/shutdown scripts
Startup Script:
/mnt/enc1/truenas/wireguard/duconet-save.sh
```bash
#!/bin/bash
cp -a /mnt/enc1/truenas/wireguard/* /etc/wireguard/
wg-quick up duconet-wg
```
Shutdown Script:
/mnt/enc1/truenas/wireguard/duconet-load.sh
```bash
#!/bin/bash
wg-quick save duconet-wg
cp -a /etc/wireguard/* /mnt/enc1/truenas/wireguard/
```

View File

@@ -1,41 +0,0 @@
- name: Configure Wireguard Network Link
hosts:
- colors
- kubernetes
- managed
become: true
become_user: root
become_method: sudo
vars_files:
- vars.yaml
tasks:
- name: Check if {{ wireguard.interface }} exists
shell: ip link show {{ wireguard.interface }}
register: link_check
ignore_errors: yes
- name: Add {{ wireguard.interface }} link
shell: ip link add dev {{ wireguard.interface }} type wireguard
when: link_check.rc != 0
- name: Add {{ wireguard.interface }} ipv6 addresses
shell: "ip address add dev {{ wireguard.interface }} {{ ip[inventory_hostname].address_ipv6 }}/64"
ignore_errors: yes
- name: Add {{ wireguard.interface }} ipv4 addresses
shell: "ip address add dev {{ wireguard.interface }} {{ ip[inventory_hostname].address_ipv4 }}/24"
ignore_errors: yes
- name: wg set port/key
shell: >
wg set {{ wireguard.interface }}
listen-port {{ wireguard.listen_port }}
private-key /etc/wireguard/privatekey
- name: Set link up
shell: ip link set up dev {{ wireguard.interface }}
- name: Touch {{ wireguard.interface }}.conf
ansible.builtin.file:
path: /etc/wireguard/{{ wireguard.interface }}.conf
state: touch
- name: save wg config
shell: wg-quick save {{ wireguard.interface }}
- name: Enable wg-quick@{{ wireguard.interface }}
ansible.builtin.systemd_service:
name: wg-quick@{{ wireguard.interface }}
enabled: true

View File

@@ -1,26 +0,0 @@
- name: Update nginx stream configuration
hosts:
- colors
- kubernetes
- managed
become: true
become_user: root
become_method: sudo
tasks:
- name: Ensure wireguard directory exists
ansible.builtin.file:
path: /etc/wireguard
state: directory
mode: '0700'
- name: Check if privatekey exists
stat: path=/etc/wireguard/privatekey
register: key
- name: Generate pubkey and privatekey
shell: wg genkey | tee /etc/wireguard/privatekey | wg pubkey > /etc/wireguard/publickey
when: not key.stat.exists or key.stat.size == 0
- name: cat pubkey
command: cat /etc/wireguard/publickey
register: pubkey
- name: Print publickey to console
debug:
msg: "{{pubkey.stdout}}"

View File

@@ -1,34 +0,0 @@
- name: Add wireguard peers to each server
hosts:
- colors
- kubernetes
- managed
become: true
become_user: root
become_method: sudo
vars_files:
- vars.yaml
tasks:
- name: delete unused peers
shell: wg set {{ wireguard.interface }} peer {{ item }} remove
loop:
- "CQxNsdPgfzjvOszjn/UZHFdAY3k+D9J+vI8qKUjCYV0="
- name: wg set peers
shell: >
wg set {{ wireguard.interface }}
peer {{ item.public_key }}
allowed-ips '{{ ip[item.name].address_ipv6 }},{{ ip[item.name].address_ipv4 }}'
persistent-keepalive 5
{% if item.endpoint %}
endpoint '{{ item.endpoint }}'
{% endif %}
loop: "{{ peers }}"
- name: wg delete peers
shell: >
wg set {{ wireguard.interface }}
peer {{ item }} remove
loop:
- 9/dBUlO9TGf0H9M3xwPiuIuz6Q/u7fSJVZaUxqAiqi8=
ignore_errors: yes
- name: save wg config
shell: wg-quick save {{ wireguard.interface }}

View File

@@ -1,61 +0,0 @@
wireguard:
listen_port: 51821
interface: duconet-wg
peers:
- name: yellow
public_key: kzbHUGzYk6Uyan/NFYY5mh3pxf2IX/WzWZtImeyp6Sw=
endpoint: yellow.reeselink.com:51821
- name: node1
public_key: 1K3CszRSSnUSWpgL7q57+LTgOEbIt8TonSK1gV/JnXE=
endpoint: node1.reeselink.com:51821
- name: node2
public_key: /7IGSgTEPh+lGYtkMUME2+0XlZEz1ILLd8J0oIxgnjA=
endpoint: node2.reeselink.com:51821
- name: node3
public_key: BwLY8W9nUCpF2xpLlvbkPkwQDV1Kqe+afCINXjEhQnY=
endpoint: node3.reeselink.com:51821
- name: driveripper
public_key: o7alrWFIMHZyeMNJDotj7Aa8ggAZ3xxcMehVnjCJjmA=
endpoint: driveripper.reeselink.com:51821
- name: unifi-external
public_key: UdbGYnVoxv9J7iv98EJ5hRfjlvPvHENsUqNJQADRHQI=
endpoint: unifi-external.reeselink.com:51821
- name: nextcloud-aio
public_key: G4L1WGm9nIwaw2p6oZqT4W7+ekoziCePrjI8AFwXHTw=
endpoint: nextcloud-aio.reeselink.com:51821
- name: pivpn
public_key: mhrhD+orgevCKJyf28KMvzHGy+0LAmNomAN1XcwjrUI=
endpoint: pivpn.reeselink.com:51821
ip:
yellow:
address_ipv6: fd00:fd41:d0f1:1010::1
address_ipv4: 10.180.238.1
hostname: yellow
node1:
address_ipv6: fd00:fd41:d0f1:1010::3
address_ipv4: 10.180.238.3
hostname: node1
node2:
address_ipv6: fd00:fd41:d0f1:1010::4
address_ipv4: 10.180.238.4
hostname: node2
node3:
address_ipv6: fd00:fd41:d0f1:1010::5
address_ipv4: 10.180.238.5
hostname: node3
driveripper:
address_ipv6: fd00:fd41:d0f1:1010::6
address_ipv4: 10.180.238.6
hostname: driveripper
unifi-external:
address_ipv6: fd00:fd41:d0f1:1010::7
address_ipv4: 10.180.238.7
hostname: unifi-external
nextcloud-aio:
address_ipv6: fd00:fd41:d0f1:1010::8
address_ipv4: 10.180.238.8
hostname: nextcloud-aio
pivpn:
address_ipv6: fd00:fd41:d0f1:1010::9
address_ipv4: 10.180.238.9
hostname: pivpn

View File

@@ -5,8 +5,8 @@ metadata:
namespace: kube-system
spec:
addresses:
- 2600:1700:1e6c:a81f:bee:bee:bee::/112
- 10.1.240.0-10.1.244.254
- 2603:6013:3140:104::4-2603:6013:3140:104:ffff:ffff:ffff:ffff
- 10.5.0.4-10.5.255.255
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
@@ -15,11 +15,15 @@ metadata:
namespace: kube-system
spec:
addresses:
- 2600:1700:1e6c:a81f:cafe:cafe:cafe::/112
- 10.1.245.0-10.1.250.254
- 2603:6013:3140:101::4-2603:6013:3140:101:ffff:ffff:ffff:ffff
- 10.4.0.4-10.4.255.255
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: external
name: l2advertisement
namespace: kube-system
spec:
ipAddressPools:
- external
- internal

View File

@@ -1,4 +1,32 @@
# Systemd Networkd
# Network Management
- [Network Management](#network-management)
- [IP Addresses](#ip-addresses)
- [Route53](#route53)
- [IPV6 EUI64 Address Generation](#ipv6-eui64-address-generation)
- [NetworkManager](#networkmanager)
## IP Addresses
| Hostname | IPV4 | IPV6 |
| -------- | ----------- | ------------------ |
| unifi | 192.168.2.1 | 2603:6013:3140:102 |
| lab | 10.1.0.1 | 2603:6013:3140:100 |
| iot | 10.2.0.1 | |
| home | 10.3.0.1 | 2603:6013:3140:103 |
| metallb | 10.5.0.1 | 2603:6013:3140:101 |
## Route53
```bash
aws route53 list-hosted-zones
# reeselink
aws route53 change-resource-record-sets --hosted-zone-id Z0092652G7L97DSINN18 --change-batch file://
# reeseapps
aws route53 change-resource-record-sets --hosted-zone-id Z012820733346FJ0U4FUF --change-batch file://
```
## IPV6 EUI64 Address Generation

View File

@@ -1,32 +0,0 @@
# Nginx Ansible Configuration
## Installation
Check vars.yaml to edit your servers.
```bash
# Run certbot first to ensure certs exist
ansible-playbook -i ansible/inventory.yaml nginx/certbot.yaml
ansible-playbook -i ansible/inventory.yaml nginx/nginx.yaml
```
## Restricted Addresses
We'll use nginx basic auth to protect our internal addresses
```bash
htpasswd -c secrets/.htpasswd ducoterra
```
## Certbot
Use `certbot delete` to remove unused certs.
## vars.yaml
`allowed_ips` restricts access to the endpoint (deny all) and then allows only the list
of ips provided.
## Logging
You can tail all the nginx logs with `tail -f /var/log/nginx/*`

View File

@@ -1,38 +0,0 @@
- name: Update certbot certs
hosts: colors
serial: 1
become: true
become_user: root
become_method: sudo
vars_files:
- vars.yaml
tasks:
- name: Ensure nginx, certbot, and nginx-mod-stream are installed
ansible.builtin.dnf:
name:
- certbot
state: present
- name: Get certs for all external domains
ansible.builtin.shell: /usr/bin/certbot certonly --dns-route53 -d '{{ item.external.domain }}{{ expose_tld }}' -n
loop: "{{ http }}"
- name: Create certbot renew service
template:
src: service/certbot-renew.service
dest: /etc/systemd/system/certbot-renew.service
owner: root
group: root
mode: '0644'
- name: Create certbot renew timer
template:
src: service/certbot-renew.timer
dest: /etc/systemd/system/certbot-renew.timer
owner: root
group: root
mode: '0644'
- name: Reload certbot-renew timer service
ansible.builtin.systemd_service:
daemon_reload: true
enabled: true
state: restarted
name: certbot-renew.timer

View File

@@ -1,79 +0,0 @@
server {
access_log /var/log/nginx/nginx_https_access.log basic;
error_log /var/log/nginx/nginx_https_error.log warn;
{% if item.external.restricted %}
if ($external_addr) {
return 404;
}
{% endif %}
{% if item.external.password_protect is defined and item.external.password_protect is sameas true %}
auth_basic "Administrators Area";
auth_basic_user_file /etc/nginx/.htpasswd;
{% endif %}
http2 on;
gzip on;
gzip_vary on;
gzip_min_length 10240;
gzip_proxied expired no-cache no-store private auth;
gzip_types text/plain text/css text/xml text/javascript application/x-javascript application/xml;
gzip_disable "MSIE [1-6]\.";
listen unix:/var/lib/nginx/tmp/nginx_http.sock proxy_protocol;
listen unix:/var/lib/nginx/tmp/nginx_https.sock ssl proxy_protocol;
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
set_real_ip_from unix:;
real_ip_header X-Real-IP;
real_ip_recursive on;
server_name {{ item.external.domain }}{{ expose_tld }};
location / {
proxy_pass {{ item.internal.protocol }}://{{ item.internal.ip }}:{{ item.internal.port }}$request_uri;
proxy_set_header X-Forwarded-Port $server_port;
proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Accept-Encoding "";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $proxy_protocol_addr;
proxy_set_header X-Forwarded-For $proxy_protocol_addr;
client_body_buffer_size 512k;
proxy_read_timeout 86400s;
client_max_body_size 0;
# Websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
ssl_certificate /etc/letsencrypt/live/{{ item.external.domain }}{{ expose_tld }}/fullchain.pem; # managed by certbot on host machine
ssl_certificate_key /etc/letsencrypt/live/{{ item.external.domain }}{{ expose_tld }}/privkey.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers EECDH+CHACHA20:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_prefer_server_ciphers on;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
ssl_trusted_certificate /etc/letsencrypt/live/{{ item.external.domain }}{{ expose_tld }}/fullchain.pem;
# replace with the IP address of your resolver
resolver 127.0.0.1;
}

View File

@@ -1,138 +0,0 @@
{%- set unique_ports = [] %}
{%- for item in http %}
{#- Gather http ports #}
{%- for port in item.external.extra_http_ports %}
{%- if port not in unique_ports %}
{{- unique_ports.append(port) }}
{%- endif %}
{%- endfor %}
{#- Gather https ports #}
{%- for port in item.external.extra_https_ports %}
{%- if port not in unique_ports %}
{{- unique_ports.append(port) }}
{%- endif %}
{%- endfor %}
{%- endfor %}
load_module /usr/lib64/nginx/modules/ngx_stream_module.so;
worker_processes auto;
events {
worker_connections 1024;
}
stream {
log_format basic '| Remote Addr: $remote_addr:$server_port | SSL Preread: $ssl_preread_server_name | Forward: $map_forward$upstream_port | Upstream Addr: $upstream_addr | $time_local | $protocol | $status | $bytes_sent | $bytes_received | $session_time |';
include /etc/nginx/stream.d/*.conf;
# Map all SSL parsed server names to hosts
map $ssl_preread_server_name $map_forward {
# Empty ssl preread gets forwarded to internal http server
"" "unix:/var/lib/nginx/tmp/nginx_http.sock";
# These domains will get forwarded to the internal https server
{% for item in http %}
{{ item.external.domain }}{{ expose_tld }} unix:/var/lib/nginx/tmp/nginx_https.sock;
{% endfor %}
{% for item in forward %}
{{ item.domain }}{{ expose_tld }} {{ item.ip }};
{% endfor %}
# By default forward to our internal nginx server (probably kubernetes)
default {{ defaults.forward_ip }};
}
# Since traffic being forwarded to the unix socket doesn't need a port we'll create
# a map here to enforce that case.
map $map_forward $upstream_port {
{{ defaults.forward_ip }} ":443";
"unix:/var/lib/nginx/tmp/nginx_http.sock" "";
"unix:/var/lib/nginx/tmp/nginx_https.sock" "";
default ":$server_port";
}
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
proxy_protocol on;
# The default http ports
{% for port in defaults.listen_ports %}
listen {{ port }};
listen [::]:{{ port }};
{% endfor %}
# Any unique ports listed in the extra_ports field
{% for port in unique_ports %}
listen {{ port }};
listen [::]:{{ port }};
{% endfor %}
proxy_pass $map_forward$upstream_port;
ssl_preread on;
proxy_socket_keepalive on;
}
}
http {
log_format basic '| Proxy Proto Addr: $proxy_protocol_addr | Internal: $external_addr | Remote Addr: $remote_addr:$server_port | Host: $host | Forward: $map_forward$server_port | Referer: $http_referer | $request | $time_local | $status |';
map $host $map_forward {
"" "unix:/var/lib/nginx/tmp/nginx_http.sock";
# We don't want to forward traffic we're terminating
# Rather we'll catch it here and redirect to 443.
{% for item in http %}
{{ item.external.domain }}{{ expose_tld }} "unix:/var/lib/nginx/tmp/nginx_https.sock";
{% endfor %}
default {{ defaults.forward_ip }};
}
# Handle internal http requests through unix:/var/lib/nginx/tmp/nginx_http.sock
server {
access_log /var/log/nginx/nginx_http_access.log basic;
error_log /var/log/nginx/nginx_http_error.log warn;
listen unix:/var/lib/nginx/tmp/nginx_http.sock default_server proxy_protocol;
location / {
# If we have an external forward IP, forward traffic
if ($map_forward != "unix:/var/lib/nginx/tmp/nginx_http.sock") {
proxy_pass $map_forward:80;
}
# Else redirect if the scheme is http
if ($scheme = "http") {
return 301 https://$host:443$request_uri;
}
}
}
map $proxy_protocol_addr $external_addr {
default 1;
{% for ip in internal_ipv4_regex %}
~{{ ip }} 0;
{% endfor %}
{% for ip in internal_ipv6_regex %}
~{{ ip }} 0;
{% endfor %}
}
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
include /etc/nginx/http.d/*.conf;
}

View File

@@ -1,82 +0,0 @@
- name: Update nginx stream configuration
hosts: colors
become: true
become_user: root
become_method: sudo
vars_files:
- vars.yaml
tasks:
- name: Ensure nginx, certbot, and nginx-mod-stream are installed
ansible.builtin.dnf:
name:
- nginx
- nginx-mod-stream
state: present
- name: Remove http.d dir before repopulating
file:
path: /etc/nginx/http.d/
state: absent
- name: Remove stream.d dir before repopulating
file:
path: /etc/nginx/stream.d/
state: absent
- name: Create stream.d dir
ansible.builtin.file:
path: /etc/nginx/stream.d
state: directory
mode: '0755'
- name: Create http.d dir
ansible.builtin.file:
path: /etc/nginx/http.d
state: directory
mode: '0755'
- name: Copy nginx.conf
template:
src: nginx.conf
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
- name: Copy .htpasswd
template:
src: ../secrets/.htpasswd
dest: /etc/nginx/.htpasswd
owner: nginx
group: nginx
mode: '0600'
- name: Copy stream configurations
template:
src: "{{ item }}"
dest: /etc/nginx/stream.d/{{ item | basename }}
owner: root
group: root
mode: '0644'
with_fileglob:
- stream.d/*
- name: Template all http configurations
template:
src: https.conf
dest: /etc/nginx/http.d/{{ item.external.domain }}.conf
owner: root
group: root
mode: '0644'
loop: "{{ http }}"
- name: Test nginx configuration
ansible.builtin.shell: /usr/sbin/nginx -t
- name: Stop nginx service
ansible.builtin.systemd_service:
state: stopped
name: nginx
- name: Remove old socket files
file:
path: /var/lib/nginx/tmp/nginx_http.sock
state: absent
- name: Remove old socket files
file:
path: /var/lib/nginx/tmp/nginx_https.sock
state: absent
- name: Reload nginx service
ansible.builtin.systemd_service:
state: started
name: nginx
enabled: true

View File

@@ -1,6 +0,0 @@
[Unit]
Description=Certbot Renewal
[Service]
Type=oneshot
ExecStart=/usr/bin/certbot renew --dns-route53 -n

View File

@@ -1,9 +0,0 @@
[Unit]
Description=Timer for Certbot Renewal
[Timer]
OnBootSec=300
OnUnitActiveSec=1w
[Install]
WantedBy=multi-user.target

View File

@@ -1,8 +0,0 @@
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 2222;
listen [::]:2222;
proxy_pass 10.1.2.100:2222;
}

View File

@@ -1,11 +0,0 @@
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 5201;
listen [::]:5201;
listen 5201 udp;
listen [::]:5201 udp;
proxy_pass 127.0.0.1:5202;
}

View File

@@ -1,12 +0,0 @@
upstream kube_backend {
server 10.1.2.13:6443 max_fails=2 fail_timeout=30s;
}
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 6443;
listen [::]:6443;
proxy_pass kube_backend;
}

View File

@@ -1,9 +0,0 @@
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 25565-25575;
listen [::]:25565-25575;
proxy_pass 10.1.2.100:$server_port;
}

View File

@@ -1,11 +0,0 @@
server {
access_log /var/log/nginx/nginx_stream_access.log basic;
error_log /var/log/nginx/nginx_stream_error.log warn;
listen 3478;
listen 3478 udp;
listen [::]:3478;
listen [::]:3478 udp;
proxy_pass 10.1.175.237:3478;
}

View File

@@ -1,175 +0,0 @@
defaults:
forward_ip: "10.1.2.101"
dns_ip: "10.1.2.102"
listen_ports:
- 443
- 80
http:
internal_http_port: 80
internal_https_port: 443
internal_ipv4_regex:
- "10.1.*"
internal_ipv6_regex:
- "2600:1700:1e6c:a81f.*"
expose_tld: .reeseapps.com
forward:
- domain: nextcloud
ip: fd00:fd41:d0f1:1010::8
http:
- external:
domain: homeassistant
restricted: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.27.89"
port: 8123
protocol: https
- external:
domain: driveripper
restricted: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::6]"
port: 8443
protocol: https
- external:
domain: e3s1plus
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.1.224.78"
port: 80
protocol: http
- external:
domain: yellow
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::1]"
port: 9090
protocol: https
- external:
domain: node1
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::3]"
port: 9090
protocol: https
- external:
domain: node2
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::4]"
port: 9090
protocol: https
- external:
domain: node3
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::5]"
port: 9090
protocol: https
- external:
domain: unifi-external-cockpit
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::7]"
port: 9090
protocol: https
- external:
domain: nextcloud-aio-cockpit
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::8]"
port: 9090
protocol: https
# Printer
- external:
domain: cr10se
restricted: true
extra_http_ports: []
extra_https_ports:
# websocket
- 9999
# camera
- 8080
internal:
ip: "10.2.165.70"
port: 80
protocol: http
- external:
domain: pihole-yellow
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "10.180.238.1"
port: 8081
protocol: http
- external:
domain: attmodem
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: 192.168.1.254
port: 80
protocol: http
- external:
domain: nextcloud-aio
restricted: false
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::8]"
port: 11000
protocol: http
- external:
domain: unifi-external
restricted: false
extra_http_ports:
- 8080
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::7]"
port: 8443
protocol: https
- external:
domain: pivpn
restricted: true
extra_http_ports: []
extra_https_ports: []
internal:
ip: "[fd00:fd41:d0f1:1010::9]"
port: 9090
protocol: https

12
shelly/README.md Normal file
View File

@@ -0,0 +1,12 @@
# Shelly Devices
## Shelly Plug US
1. Connect to WiFi
2. Set password for AP
3. Disable AP
4. Set password for device authentication
5. Set Restore last known state of output/relay
6. Set Device Name
7. Enable Bluetooth Gateway
8. Update Firmware

View File

@@ -18,26 +18,24 @@ On the operator:
```bash
export SSH_HOST=kube
ssh-keygen -t rsa -b 4096 -C ducoterra@"$SSH_HOST".reeselink.com -f ~/.ssh/id_"$SSH_HOST"_rsa
ssh-keygen -t rsa -b 4096 -C ducoterra@${SSH_HOST}.reeselink.com -f ~/.ssh/id_${SSH_HOST}_rsa
# Note: If you get "too many authentication failures" it's likely because you have too many private
# keys in your ~/.ssh directory. Use `-o PubkeyAuthentication` to fix it.
ssh-copy-id -o PubkeyAuthentication=no -i ~/.ssh/id_$SSH_HOST_rsa.pub ducoterra@"$SSH_HOST".reeselink.com
ssh-copy-id -o PubkeyAuthentication=no -i ~/.ssh/id_${SSH_HOST}_rsa.pub ducoterra@${SSH_HOST}.reeselink.com
cat <<EOF >> ~/.ssh/config
Host $SSH_HOST
Hostname $SSH_HOST.reeselink.com
Hostname ${SSH_HOST}.reeselink.com
User root
ProxyCommand none
ForwardAgent no
ForwardX11 no
Port 22
KeepAlive yes
IdentityFile ~/.ssh/id_"$SSH_HOST"_rsa
IdentityFile ~/.ssh/id_${SSH_HOST}_rsa
EOF
ssh -o PubkeyAuthentication=no ducoterra@"$SSH_HOST".reeselink.com
```
On the server:
@@ -52,13 +50,14 @@ passwd
sudo su -
echo "PasswordAuthentication no" > /etc/ssh/sshd_config.d/01-prohibit-password.conf
echo '%sudo ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/01-nopasswd-sudo
systemctl restart sshd
```
On the operator:
```bash
# Test if you can SSH with a password
ssh -o PubkeyAuthentication=no ducoterra@"$SSH_HOST".reeselink.com
ssh -o PubkeyAuthentication=no ducoterra@${SSH_HOST}.reeselink.com
# Test that you can log into the server with ssh config
ssh $SSH_HOST