add bitnami wordpress example

This commit is contained in:
2023-11-06 08:42:51 -05:00
parent 658dfd8e3f
commit 48afc0bfa2
2 changed files with 69 additions and 0 deletions

View File

@@ -23,6 +23,7 @@ A project to store container-based hosting stuff.
- [Snapdrop](#snapdrop)
- [Jellyfin](#jellyfin)
- [Iperf3](#iperf3)
- [Wordpress](#wordpress)
- [Upgrading](#upgrading)
- [Nodes](#nodes)
- [K3S](#k3s)
@@ -647,6 +648,23 @@ helm upgrade --install \
--create-namespace
```
### Wordpress
The bitnami wordpress chart allows enough customization to
work on a custom K3S server. With some tweaks it's quite
good. Use the values in `bitnami/wordpress.yaml` as a starting
point.
```bash
helm upgrade --install \
wordpress \
-f bitnami/wordpress.yaml \
--set wordpressUsername=admin \
--set wordpressPassword=password \
--set mariadb.auth.rootPassword=secretpassword \
oci://registry-1.docker.io/bitnamicharts/wordpress
```
## Upgrading
### Nodes

51
bitnami/wordpress.yaml Normal file
View File

@@ -0,0 +1,51 @@
service:
# Don't use an external IP address
type: ClusterIP
ingress:
enabled: true
pathType: Prefix
# Change this
hostname: wordpress.reeseapps.com
annotations:
# Get a cert from letsencrypt
cert-manager.io/cluster-issuer: letsencrypt
# Use the nginx ingress class
kubernetes.io/ingress.class: nginx
# Allow infinitely large uploads (change this)
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
tls: true
resources:
# namespaces have strict "request" requirements
requests:
cpu: 100m
memory: 128Mi
# limits are unbounded - allow some breathing room
limits:
cpu: 2
memory: 1Gi
updateStrategy:
# Since the default storage is single-node mount we can't
# use the typical rolling update strategy because the new
# pod might try to start on a node without the storage
# mounted. We can get around this by tearing down the old
# pod before spinning up the new one. This will result in
# down time, we can also change the default storage to
# fix this.
type: Recreate
# These tolerations ensure that if one of my nodes goes down
# for some reason your pods will jump to the next available
# node within 1 second of it being unreachable.
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1