move to project lifecycle structure

This commit is contained in:
2024-07-21 02:20:48 -04:00
parent fd1fde499d
commit e6aff894e8
121 changed files with 6234 additions and 196 deletions

View File

@@ -0,0 +1,387 @@
# Default values for coredns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: coredns/coredns
# Overrides the image tag whose default is the chart appVersion.
tag: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
pullSecrets: []
# pullSecrets:
# - name: myRegistryKeySecretName
replicaCount: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
terminationGracePeriodSeconds: 30
podAnnotations: {}
# cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
serviceType: "ClusterIP"
prometheus:
service:
enabled: false
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
selector: {}
monitor:
enabled: false
additionalLabels: {}
namespace: ""
interval: ""
selector: {}
service:
clusterIP: fd02:c91e:56f5::10
clusterIPs:
- fd02:c91e:56f5::10
- 10.43.0.10
# loadBalancerIP: ""
# externalIPs: []
# externalTrafficPolicy: ""
ipFamilyPolicy: "RequireDualStack"
# The name of the Service
# If not set, a name is generated using the fullname template
name: ""
annotations: {}
# Pod selector
selector: {}
serviceAccount:
create: false
# The name of the ServiceAccount to use
# If not set and create is true, a name is generated using the fullname template
name: ""
annotations: {}
rbac:
# If true, create & use RBAC resources
create: true
# If true, create and use PodSecurityPolicy
pspEnable: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
# name:
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
isClusterService: true
# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set.
priorityClassName: ""
# Configure the pod level securityContext.
podSecurityContext: {}
# Configure SecurityContext for Pod.
# Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`).
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
# Default zone is what Kubernetes recommends:
# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options
servers:
- zones:
- zone: .
port: 53
# If serviceType is nodePort you can specify nodePort here
# nodePort: 30053
# hostPort: 53
plugins:
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
configBlock: |-
lameduck 5s
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . 2606:4700:4700::1111
- name: cache
parameters: 30
- name: loop
- name: reload
- name: loadbalance
# Complete example with all the options:
# - zones: # the `zones` block can be left out entirely, defaults to "."
# - zone: hello.world. # optional, defaults to "."
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
# - zone: foo.bar.
# scheme: dns://
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
# # Note that this will not work if you are also exposing tls or grpc on the same server
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
# plugins: # the plugins to use for this server block
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
# parameters: foo bar # list of parameters after the plugin
# configBlock: |- # if the plugin supports extra block style config, supply it here
# hello world
# foo bar
# Extra configuration that is applied outside of the default zone block.
# Example to include additional config files, which may come from extraVolumes:
# extraConfig:
# import:
# parameters: /opt/coredns/*.conf
extraConfig: {}
# To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
# To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#topologyspreadconstraint-v1-core
# and supports Helm templating.
# For example:
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: '{{ template "coredns.name" . }}'
# app.kubernetes.io/instance: '{{ .Release.Name }}'
# topologyKey: topology.kubernetes.io/zone
# maxSkew: 1
# whenUnsatisfiable: ScheduleAnyway
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: '{{ template "coredns.name" . }}'
# app.kubernetes.io/instance: '{{ .Release.Name }}'
# topologyKey: kubernetes.io/hostname
# maxSkew: 1
# whenUnsatisfiable: ScheduleAnyway
topologySpreadConstraints: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
podDisruptionBudget: {}
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
zoneFiles: []
# - filename: example.db
# domain: example.com
# contents: |
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
# example.com. IN NS b.iana-servers.net.
# example.com. IN NS a.iana-servers.net.
# example.com. IN A 192.168.99.102
# *.example.com. IN A 192.168.99.102
# optional array of sidecar containers
extraContainers: []
# - name: some-container-name
# image: some-image:latest
# imagePullPolicy: Always
# optional array of extra volumes to create
extraVolumes: []
# - name: some-volume-name
# emptyDir: {}
# optional array of mount points for extraVolumes
extraVolumeMounts: []
# - name: some-volume-name
# mountPath: /etc/wherever
# optional array of secrets to mount inside coredns container
# possible usecase: need for secure connection with etcd backend
extraSecrets: []
# - name: etcd-client-certs
# mountPath: /etc/coredns/tls/etcd
# defaultMode: 420
# - name: some-fancy-secret
# mountPath: /etc/wherever
# defaultMode: 440
# To support legacy deployments using CoreDNS with the "k8s-app: kube-dns" label selectors.
# See https://github.com/coredns/helm/blob/master/charts/coredns/README.md#adopting-existing-coredns-resources
# k8sAppLabelOverride: "kube-dns"
# Custom labels to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled.
customLabels: {}
# Custom annotations to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled.
customAnnotations: {}
## Alternative configuration for HPA deployment if wanted
## Create HorizontalPodAutoscaler object.
##
# hpa:
# enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# metrics:
# - type: Resource
# resource:
# name: memory
# target:
# type: Utilization
# averageUtilization: 60
# - type: Resource
# resource:
# name: cpu
# target:
# type: Utilization
# averageUtilization: 60
hpa:
enabled: false
minReplicas: 1
maxReplicas: 2
metrics: []
## Configue a cluster-proportional-autoscaler for coredns
# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler
autoscaler:
# Enabled the cluster-proportional-autoscaler
enabled: false
# Number of cores in the cluster per coredns replica
coresPerReplica: 256
# Number of nodes in the cluster per coredns replica
nodesPerReplica: 16
# Min size of replicaCount
min: 0
# Max size of replicaCount (default of 0 is no max)
max: 0
# Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler
includeUnschedulableNodes: false
# If true does not allow single points of failure to form
preventSinglePointFailure: true
# Annotations for the coredns proportional autoscaler pods
podAnnotations: {}
## Optionally specify some extra flags to pass to cluster-proprtional-autoscaler.
## Useful for e.g. the nodelabels flag.
# customFlags:
# - --nodelabels=topology.kubernetes.io/zone=us-east-1a
image:
repository: registry.k8s.io/cpa/cluster-proportional-autoscaler
tag: "1.8.5"
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
pullSecrets: []
# pullSecrets:
# - name: myRegistryKeySecretName
# Optional priority class to be used for the autoscaler pods. priorityClassName used if not set.
priorityClassName: ""
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
tolerations: []
# resources for autoscaler pod
resources:
requests:
cpu: "20m"
memory: "10Mi"
limits:
cpu: "20m"
memory: "10Mi"
# Options for autoscaler configmap
configmap:
## Annotations for the coredns-autoscaler configmap
# i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed
annotations: {}
# Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
# optional array of sidecar containers
extraContainers: []
# - name: some-container-name
# image: some-image:latest
# imagePullPolicy: Always
deployment:
skipConfig: false
enabled: true
name: ""
## Annotations for the coredns deployment
annotations: {}
## Pod selector
selector: {}

View File

@@ -0,0 +1,80 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: external-dns
labels:
app.kubernetes.io/name: external-dns
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods","nodes"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions","networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
labels:
app.kubernetes.io/name: external-dns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: kube-system # change to desired namespace: externaldns, kube-addons
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: kube-system
labels:
app.kubernetes.io/name: external-dns
spec:
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: external-dns
template:
metadata:
labels:
app.kubernetes.io/name: external-dns
spec:
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.k8s.io/external-dns/external-dns:v0.14.2
args:
- --source=service
- --source=ingress
- --domain-filter=reeseapps.com
- --provider=aws
- --aws-zone-type=public
- --registry=txt
- --txt-owner-id=external-dns
env:
- name: AWS_DEFAULT_REGION
value: us-east-1 # change to region where EKS is installed
- name: AWS_SHARED_CREDENTIALS_FILE
value: /.aws/externaldns-credentials
volumeMounts:
- name: aws-credentials
mountPath: /.aws
readOnly: true
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
volumes:
- name: aws-credentials
secret:
secretName: external-dns

View File

@@ -0,0 +1,8 @@
# comment out sa if it was previously created
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: kube-system
labels:
app.kubernetes.io/name: external-dns

View File

@@ -0,0 +1,75 @@
image:
tag: 1.21.4
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: letsencrypt
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
hosts:
- host: gitea-staging.reeseapps.com
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- gitea-staging.reeseapps.com
secretName: gitea-staging-tls-cert
persistence:
enabled: true
create: true
storageClass: zfs-iscsi-enc0
claimName: data-gitea-staging-0
annotations:
"helm.sh/resource-policy": keep
global:
storageClass: zfs-iscsi-enc1
postgresql:
enabled: true
image:
tag: 15
primary:
persistence:
enabled: true
storageClass: zfs-iscsi-enc1
annotations:
"helm.sh/resource-policy": keep
postgresql-ha:
enabled: false
gitea:
admin:
existingSecret: gitea-admin-secret
config:
service:
DISABLE_REGISTRATION: true
service:
ssh:
port: 22
type: ClusterIP
redis-cluster:
enabled: true
image:
tag: 7.2
deployment:
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
strategy:
type: Recreate

View File

@@ -0,0 +1,86 @@
image:
tag: 1.22
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
apiVersion: networking.k8s.io/v1
nginx.ingress.kubernetes.io/stream-snippet: |
server {
listen 22;
proxy_pass [::]:22;
}
hosts:
- host: gitea.reeseapps.com
paths:
- path: /
pathType: Prefix
tls:
- hosts:
- gitea.reeseapps.com
secretName: gitea-tls-cert
persistence:
enabled: true
create: true
claimName: data-gitea-0
annotations:
"helm.sh/resource-policy": keep
postgresql:
enabled: true
image:
tag: 15
primary:
persistence:
enabled: true
annotations:
"helm.sh/resource-policy": keep
postgresql-ha:
enabled: false
gitea:
admin:
existingSecret: gitea-admin-secret
config:
service:
DISABLE_REGISTRATION: true
server:
SSH_PORT: 22
SSH_DOMAIN: git.reeseapps.com
service:
ssh:
port: 22
type: LoadBalancer
externalTrafficPolicy: Local
ipFamilyPolicy: SingleStack
ipFamilies: ["IPv6"]
annotations:
metallb.universe.tf/address-pool: "external"
external-dns.alpha.kubernetes.io/hostname: git.reeseapps.com
redis-cluster:
enabled: false
image:
tag: 7.2
deployment:
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
strategy:
type: Recreate

View File

@@ -0,0 +1,13 @@
controller:
service:
externalTrafficPolicy: Local
annotations:
metallb.universe.tf/address-pool: "external"
metallb.universe.tf/allow-shared-ip: nginx
external-dns.alpha.kubernetes.io/hostname: ingress-nginx.reeseapps.com
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv6
config:
log-format-upstream: '| Remote Addr: $remote_addr:$server_port | Host: $host | Referer: $http_referer | $request | $time_local | $status |'
allowSnippetAnnotations: true

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: Iperf3
description: A Simple Iperf3 Chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View File

@@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: iperf
template:
metadata:
labels:
app.kubernetes.io/name: iperf
spec:
containers:
- name: iperf
image: networkstatic/iperf3
args: ["-s"]
ports:
- containerPort: 5201
name: iperf
resources:
requests:
memory: "256Mi"
cpu: "1m"
limits:
memory: "4Gi"
cpu: "4"

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
annotations:
metallb.universe.tf/address-pool: "internal"
external-dns.alpha.kubernetes.io/hostname: {{ .Release.Name }}.reeseapps.com
spec:
type: LoadBalancer
ipFamilies: ["IPv6"]
selector:
app.kubernetes.io/name: iperf
ports:
- name: iperf
protocol: TCP
port: 5201
targetPort: iperf

View File

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: Nextcloud
description: A Simple Nextcloud Chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View File

@@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: jellyfin
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: jellyfin
spec:
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
containers:
- name: jellyfin
image: {{ .Values.jellyfin.image }}
ports:
- containerPort: 8096
name: http
volumeMounts:
- mountPath: /config
name: config
- mountPath: /cache
name: cache
- mountPath: /movies
name: movies
- mountPath: /shows
name: shows
- mountPath: /videos
name: videos
resources:
requests:
memory: "1Gi"
cpu: "1m"
limits:
memory: "8Gi"
cpu: "24"
volumes:
- name: config
persistentVolumeClaim:
claimName: {{ .Release.Name }}-config
- name: cache
persistentVolumeClaim:
claimName: {{ .Release.Name }}-cache
- name: movies
nfs:
server: driveripper.reeselink.com
path: /mnt/enc0/media/Movies
readOnly: true
- name: shows
nfs:
server: driveripper.reeselink.com
path: /mnt/enc0/media/Shows
readOnly: true
- name: videos
nfs:
server: driveripper.reeselink.com
path: /mnt/enc0/media/Videos
readOnly: true

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.jellyfin.domain }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
name: http
tls:
- hosts:
- {{ .Values.jellyfin.domain }}
secretName: jellyfin-tls-cert

View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-config
annotations:
"helm.sh/resource-policy": keep
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-cache
annotations:
"helm.sh/resource-policy": keep
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 16Gi

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
ipFamilyPolicy: PreferDualStack
ipFamilies:
- IPv6
- IPv4
type: ClusterIP
selector:
app.kubernetes.io/name: jellyfin
ports:
- name: http
protocol: TCP
port: 80
targetPort: http

View File

@@ -0,0 +1,3 @@
jellyfin:
image: jellyfin/jellyfin:10
domain: jellyfin.reeseapps.com

View File

@@ -0,0 +1,13 @@
- op: replace # action
path: /data/config.json # resource we want to change
value: |-
{
"storageClassConfigs": {
"ssd": {
"sharedFileSystemPath": "/opt/local-path-provisioner/ssd"
},
"hdd": {
"sharedFileSystemPath": "/opt/local-path-provisioner/hdd"
}
}
}

View File

@@ -0,0 +1,3 @@
- op: replace # action
path: /metadata/name # resource we want to change
value: hdd # value we want to use for patching

View File

@@ -0,0 +1,19 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- local-path-storage.yaml
- ssd-storage.yaml
patches:
- target:
group: storage.k8s.io
version: v1
kind: StorageClass
name: local-path
path: StorageClass-hdd-patch.yaml
- target:
group: ""
version: v1
kind: ConfigMap
path: ConfigMap-patch.yaml
- target: {}
path: namespace-patch.yaml

View File

@@ -0,0 +1,189 @@
apiVersion: v1
kind: Namespace
metadata:
name: local-path-storage
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local-path
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: local-path-provisioner-role
namespace: local-path-storage
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- create
- patch
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: local-path-provisioner-role
rules:
- apiGroups:
- ""
resources:
- nodes
- persistentvolumeclaims
- configmaps
- pods
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- create
- patch
- update
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: local-path-provisioner-bind
namespace: local-path-storage
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-path-provisioner-bind
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: local-path-provisioner-role
subjects:
- kind: ServiceAccount
name: local-path-provisioner-service-account
namespace: local-path-storage
---
apiVersion: v1
data:
config.json: |-
{
"nodePathMap":[
{
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
"paths":["/opt/local-path-provisioner"]
}
]
}
helperPod.yaml: |-
apiVersion: v1
kind: Pod
metadata:
name: helper-pod
spec:
priorityClassName: system-node-critical
tolerations:
- key: node.kubernetes.io/disk-pressure
operator: Exists
effect: NoSchedule
containers:
- name: helper-pod
image: busybox
imagePullPolicy: IfNotPresent
setup: |-
#!/bin/sh
set -eu
mkdir -m 0777 -p "$VOL_DIR"
teardown: |-
#!/bin/sh
set -eu
rm -rf "$VOL_DIR"
kind: ConfigMap
metadata:
name: local-path-config
namespace: local-path-storage
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-path-provisioner
namespace: local-path-storage
spec:
replicas: 1
selector:
matchLabels:
app: local-path-provisioner
template:
metadata:
labels:
app: local-path-provisioner
spec:
containers:
- command:
- local-path-provisioner
- --debug
- start
- --config
- /etc/config/config.json
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_MOUNT_PATH
value: /etc/config/
image: rancher/local-path-provisioner:v0.0.28
imagePullPolicy: IfNotPresent
name: local-path-provisioner
volumeMounts:
- mountPath: /etc/config/
name: config-volume
serviceAccountName: local-path-provisioner-service-account
volumes:
- configMap:
name: local-path-config
name: config-volume

View File

@@ -0,0 +1,3 @@
- op: replace # action
path: /metadata/namespace # resource we want to change
value: kube-system

View File

@@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ssd
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rancher.io/local-path
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer

View File

@@ -0,0 +1,29 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: external
namespace: kube-system
spec:
addresses:
- 2603:6013:3140:104::4-2603:6013:3140:104:ffff:ffff:ffff:ffff
- 10.5.0.4-10.5.255.255
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: internal
namespace: kube-system
spec:
addresses:
- 2603:6013:3140:101::4-2603:6013:3140:101:ffff:ffff:ffff:ffff
- 10.4.0.4-10.4.255.255
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2advertisement
namespace: kube-system
spec:
ipAddressPools:
- external
- internal

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: Minecraft
description: A Simple Minecraft Chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}
data:
eula.txt: |
eula=true
server.properties: {{ toYaml .Values.server_props | indent 2 }}

View File

@@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
selector:
matchLabels:
app: {{ .Release.Name }}
strategy:
type: Recreate
template:
metadata:
labels:
app: {{ .Release.Name }}
spec:
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
initContainers:
- name: get-version
image: {{ .Values.get_server.image }}
imagePullPolicy: Always
env:
- name: SERVER_VERSION
value: {{ .Values.server_version | quote }}
volumeMounts:
- mountPath: /downloads
name: data
containers:
- name: {{ .Release.Name }}
image: {{ .Values.image }}
imagePullPolicy: Always
ports:
- containerPort: 25565
volumeMounts:
- mountPath: /mc_data
name: data
- name: properties
mountPath: /mc_data/server.properties
subPath: server.properties
- name: properties
mountPath: /mc_data/eula.txt
subPath: eula.txt
tty: true
stdin: true
env:
- name: MAX_RAM
value: {{ .Values.max_ram | quote }}
- name: MIN_RAM
value: "1"
resources:
requests:
memory: {{ div .Values.max_ram 2 }}Gi
cpu: 1m
limits:
memory: {{ add 1 .Values.max_ram }}Gi
cpu: {{ .Values.max_cpu | quote }}
volumes:
- name: data
persistentVolumeClaim:
claimName: {{ .Release.Name }}
- name: properties
configMap:
name: {{ .Release.Name }}
securityContext:
fsGroup: 2000

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}
annotations:
"helm.sh/resource-policy": keep
spec:
storageClassName: ssd
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 32Gi

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
annotations:
metallb.universe.tf/address-pool: "external"
external-dns.alpha.kubernetes.io/hostname: {{ .Release.Name }}.reeseapps.com
spec:
ipFamilies: ["IPv6"]
externalTrafficPolicy: Cluster
selector:
app: {{ .Release.Name }}
ports:
- port: {{ .Values.port }}
targetPort: 25565
name: {{ .Release.Name }}
type: LoadBalancer

View File

@@ -0,0 +1,65 @@
image: ducoterra/minecraft:1.0.4
get_server:
image: ducoterra/get-minecraft:2.0.2
server_version: "1.21"
port: 25565
max_cpu: 4
max_ram: 8
server_props: |
enable-jmx-monitoring=false
rcon.port=25575
level-seed=
gamemode=survival
enable-command-block=false
enable-query=false
generator-settings={}
enforce-secure-profile=true
level-name=world
motd=A Minecraft Server
query.port=25565
pvp=true
generate-structures=true
max-chained-neighbor-updates=1000000
difficulty=easy
network-compression-threshold=256
max-tick-time=600000
require-resource-pack=false
use-native-transport=true
max-players=20
online-mode=true
enable-status=true
allow-flight=false
initial-disabled-packs=
broadcast-rcon-to-ops=true
view-distance=10
server-ip=
resource-pack-prompt=
allow-nether=true
server-port=25565
enable-rcon=false
sync-chunk-writes=true
op-permission-level=4
prevent-proxy-connections=false
hide-online-players=false
resource-pack=
entity-broadcast-range-percentage=100
simulation-distance=10
rcon.password=
player-idle-timeout=0
force-gamemode=false
rate-limit=0
hardcore=false
white-list=true
broadcast-console-to-ops=true
spawn-npcs=true
spawn-animals=true
log-ips=true
function-permission-level=2
initial-enabled-packs=vanilla
level-type=minecraft\:normal
text-filtering-config=
spawn-monsters=true
enforce-whitelist=true
spawn-protection=16
resource-pack-sha1=
max-world-size=29999984

View File

@@ -0,0 +1,5 @@
FROM nginx
COPY blocklists.txt /usr/share/nginx/html
RUN for url in $(cat /usr/share/nginx/html/blocklists.txt);\
do echo >> /usr/share/nginx/html/hosts && curl $url >> /usr/share/nginx/html/hosts;\
done

View File

@@ -0,0 +1,29 @@
# Pihole
See `podman` for the pihole installation. This is just the blocklist.
## Blocklists
Add this to your pihole adlists:
<https://blocklist.reeseapps.com/hosts>
## Updating blocklist.reeseapps.com
Add lists to blocklists.txt and build the project.
Blocklists are built into an nginx image to be served with docker:
```bash
podman-compose build
podman-compose push
helm upgrade --install \
--namespace pihole \
--create-namespace \
blocklist ./pihole_blocklist/helm
```
## Notes
<https://v.firebog.net/hosts/lists.php>

View File

@@ -0,0 +1,48 @@
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts
https://v.firebog.net/hosts/static/w3kbl.txt
https://raw.githubusercontent.com/matomo-org/referrer-spam-blacklist/master/spammers.txt
https://someonewhocares.org/hosts/zero/hosts
https://raw.githubusercontent.com/VeleSila/yhosts/master/hosts
https://winhelp2002.mvps.org/hosts.txt
https://v.firebog.net/hosts/neohostsbasic.txt
https://raw.githubusercontent.com/RooneyMcNibNug/pihole-stuff/master/SNAFU.txt
https://paulgb.github.io/BarbBlock/blacklists/hosts-file.txt
https://adaway.org/hosts.txt
https://v.firebog.net/hosts/AdguardDNS.txt
https://v.firebog.net/hosts/Admiral.txt
https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt
https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
https://v.firebog.net/hosts/Easylist.txt
https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/UncheckyAds/hosts
https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts
https://raw.githubusercontent.com/jdlingyu/ad-wars/master/hosts
https://v.firebog.net/hosts/Easyprivacy.txt
https://v.firebog.net/hosts/Prigent-Ads.txt
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts
https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt
https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt
https://www.github.developerdan.com/hosts/lists/ads-and-tracking-extended.txt
https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/android-tracking.txt
https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/SmartTV.txt
https://raw.githubusercontent.com/Perflyst/PiHoleBlocklist/master/AmazonFireTV.txt
https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-blocklist.txt
https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt
https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt
https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt
https://v.firebog.net/hosts/Prigent-Crypto.txt
https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts
https://bitbucket.org/ethanr/dns-blacklists/raw/8575c9f96e5b4a1308f2f12394abd86d0927a4a0/bad_lists/Mandiant_APT1_Report_Appendix_D.txt
https://phishing.army/download/phishing_army_blocklist_extended.txt
https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt
https://v.firebog.net/hosts/RPiList-Malware.txt
https://v.firebog.net/hosts/RPiList-Phishing.txt
https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt
https://raw.githubusercontent.com/AssoEchap/stalkerware-indicators/master/generated/hosts
https://urlhaus.abuse.ch/downloads/hostfile/
https://malware-filter.gitlab.io/malware-filter/phishing-filter-hosts.txt
https://v.firebog.net/hosts/Prigent-Malware.txt
https://zerodot1.gitlab.io/CoinBlockerLists/hosts_browser
https://blocklistproject.github.io/Lists/everything.txt

View File

@@ -0,0 +1,8 @@
version: '3'
services:
repo:
image: ducoterra/blocklist:0.0.2
build: .
ports:
- 8080:80

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: blocklist
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,73 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: {{ .Release.Name }}
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: {{ .Release.Name }}
spec:
containers:
- name: nginx
image: {{ .Values.image }}
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
resources:
requests:
memory: "1Gi"
cpu: "1m"
limits:
memory: "1Gi"
cpu: "1"
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: {{ .Release.Name }}
ports:
- name: http
protocol: TCP
port: 80
targetPort: http
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.domain }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ .Release.Name }}
port:
name: http
tls:
- hosts:
- {{ .Values.domain }}
secretName: {{ .Release.Name }}-tls-cert

View File

@@ -0,0 +1,2 @@
image: ducoterra/blocklist:0.0.2
domain: blocklist.reeseapps.com

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: Nextcloud
description: A Simple Nextcloud Chart
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-snapdrop
annotations:
"helm.sh/resource-policy": keep
data:
PUID: "1000"
PGID: "1000"
TZ: Etc/UTC

View File

@@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
selector:
matchLabels:
app.kubernetes.io/name: snapdrop
strategy:
type: Recreate
template:
metadata:
labels:
app.kubernetes.io/name: snapdrop
spec:
containers:
- name: snapdrop
image: {{ .Values.snapdrop.image }}
ports:
- containerPort: 80
name: http
envFrom:
- configMapRef:
name: {{ .Release.Name }}-snapdrop
volumeMounts:
- mountPath: /config
name: config
resources:
requests:
memory: "1Gi"
cpu: "1m"
limits:
memory: "4Gi"
cpu: "4"
volumes:
- name: config
persistentVolumeClaim:
claimName: {{ .Release.Name }}-config

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ .Release.Name }}
annotations:
cert-manager.io/cluster-issuer: letsencrypt
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
spec:
ingressClassName: nginx
rules:
- host: {{ .Values.snapdrop.domain }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: snapdrop
port:
name: http
tls:
- hosts:
- {{ .Values.snapdrop.domain }}
secretName: snapdrop-tls-cert

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Release.Name }}-config
annotations:
"helm.sh/resource-policy": keep
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: snapdrop
ports:
- name: http
protocol: TCP
port: 80
targetPort: http

View File

@@ -0,0 +1,3 @@
snapdrop:
image: linuxserver/snapdrop:latest
domain: snapdrop.reeseapps.com

View File

@@ -0,0 +1,29 @@
# Grafana
## Helm
```bash
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update
helm upgrade --install my-grafana grafana/grafana \
--namespace monitoring \
--create-namespace \
--values grafana/values.yaml
kubectl get secret --namespace monitoring my-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
```
metrics-server.kube-system.svc.cluster.local:9090
## Kube Prometheus Stack
```bash
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm upgrade --install \
kube-prometheus-stack \
prometheus-community/kube-prometheus-stack \
--namespace kube-system \
--values grafana/helm-prom-stack-values.yaml
```

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: namespace
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View File

@@ -0,0 +1,210 @@
# Project Userspace
## One provisioner to rule them all
### Quickstart
```bash
./userspace/scripts/setup.sh
./userspace/scripts/upsertuser.sh <server_fqdn> <username>
./userspace/scripts/removeuser.sh <server_fqdn> <username>
```
### Update a user
```bash
export USER=user
helm template $USER ./namespace | kubectl --context admin apply -f -
```
### Objectives
1. Provision a namespace with clusterroles, rolebindings, and a dedicated nfs-provisioner with one helm chart
2. Create an easy way for users to sign their certificates
3. Create a cleanup script without deleting user data
4. profit
### Userspace
#### Namespace
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Name }}
```
#### Roles
```yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: namespace-manager
namespace: {{ .Release.Name }}
rules:
- apiGroups:
- ""
- extensions
- apps
- batch
- autoscaling
- networking.k8s.io
- traefik.containo.us
- rbac.authorization.k8s.io
- metrics.k8s.io
resources:
- deployments
- replicasets
- pods
- pods/exec
- pods/log
- pods/attach
- daemonsets
- statefulsets
- replicationcontrollers
- horizontalpodautoscalers
- services
- ingresses
- persistentvolumeclaims
- jobs
- cronjobs
- secrets
- configmaps
- serviceaccounts
- rolebindings
- ingressroutes
- middlewares
- endpoints
verbs:
- "*"
- apiGroups:
- ""
- metrics.k8s.io
- rbac.authorization.k8s.io
resources:
- resourcequotas
- roles
verbs:
- list
```
#### Rolebinding
```yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: {{ .Release.Name }}
name: namespace-manager
subjects:
- kind: User
name: {{ .Release.Name }}
apiGroup: ""
roleRef:
kind: ClusterRole
name: namespace-manager
apiGroup: ""
```
### Create a kubernetes certsigner pod
This keeps the client-ca crt and key secret and allows the cert to be signed and stored on the pod
#### Create the certsigner secret
```bash
kubectl -n kube-system create secret generic certsigner --from-file /var/lib/rancher/k3s/server/tls/client-ca.crt --from-file /var/lib/rancher/k3s/server/tls/client-ca.key
```
#### Set up the certsigner pod
```bash
kubectl --context admin apply -f certsigner
```
#### Generate a cert
```bash
export USER=<user>
docker run -it -v $(pwd)/users/$USER:/$USER python:latest openssl genrsa -out /$USER/$USER.key 2048
docker run -it -v $(pwd)/users/$USER:/$USER python:latest openssl req -new -key /$USER/$USER.key -out /$USER/$USER.csr -subj "/CN=$USER/O=user"
```
#### Create a new Userspace
```bash
helm template $USER ./namespace | kubectl --context admin apply -f -
```
#### Sign the cert
```bash
export USER=<user>
kubectl --context admin cp $(pwd)/users/$USER/$USER.csr certsigner:/certs/$USER.csr
kubectl --context admin exec -it --context admin certsigner -- openssl x509 -in /certs/$USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -CAcreateserial -out /certs/$USER.crt -days 5000
kubectl --context admin cp certsigner:/certs/$USER.crt $(pwd)/users/$USER/$USER.crt
```
#### Add to the config
```bash
kubectl config set-credentials $USER --client-certificate=$USER.crt --client-key=$USER.key
kubectl config set-context $USER --cluster=mainframe --namespace=$USER --user=$USER
```
#### Delete
```bash
kubectl config delete-context $USER
helm template $USER ./namespace | kubectl --context admin delete -f -
```
### Signing a user cert - detailed notes
NOTE: ca.crt and ca.key are in /var/lib/rancher/k3s/server/tls/client-ca.*
```bash
# First we create the credentials
# /CN=<username> - the user
# /O=<group> - the group
# Navigate to the user directory
export USER=<username>
cd $USER
# Generate a private key
openssl genrsa -out $USER.key 2048
# Check the key
# openssl pkey -in ca.key -noout -text
# Generate and send me the CSR
# The "user" group is my default group
openssl req -new -key $USER.key -out $USER.csr -subj "/CN=$USER/O=user"
# Check the CSR
# openssl req -in $USER.csr -noout -text
# If satisfactory, sign the CSR
# Copy from /var/lib/rancher/k3s/server/tls/client-ca.crt and client-ca.key
openssl x509 -req -in $USER.csr -CA ../client-ca.crt -CAkey ../client-ca.key -CAcreateserial -out $USER.crt -days 5000
# Review the certificate
# openssl x509 -in $USER.crt -text -noout
# Send back the crt
# cp $USER.crt $USER.key ../server-ca.crt ~/.kube/
kubectl config set-credentials $USER --client-certificate=$USER.crt --client-key=$USER.key
kubectl config set-context $USER --cluster=mainframe --namespace=$USER --user=$USER
# Now we create the namespace, rolebindings, and resource quotas
# kubectl apply -f k8s/
# Add the cluster
# CA file can be found at https://3.14.3.100:6443/cacerts
- cluster:
certificate-authority: server-ca.crt
server: https://3.14.3.100:6443
name: mainframe
# Test if everything worked
kubectl --context=$USER-context get pods
```

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: certsigner
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: certsigner
template:
metadata:
labels:
app: certsigner
spec:
containers:
- name: certsigner
image: python:latest
command: ["cat"]
tty: true
resources:
requests:
memory: 1Mi
cpu: 1m
limits:
memory: 100Mi
cpu: 100m
volumeMounts:
- mountPath: /keys
name: keys
- mountPath: /certs
name: certs
volumes:
- name: keys
secret:
secretName: certsigner
- name: certs
emptyDir:
sizeLimit: 500Mi
restartPolicy: Always

View File

@@ -0,0 +1,38 @@
#!/bin/bash
# Use
# ./removeuserspace <ssh_address> <server_fqdn (for kubectl)> <user>
export SERVER=$1
export FQDN=$2
export USER=$3
export CERT_DIR=$HOME/.kube/$FQDN/users/$USER
export CA_CERT_DIR=$HOME/.kube/$FQDN
export SERVER_USER_DIR="~/.kube/users/$USER"
export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g')
export SERVER_USER="$USER-$SERVER_NAME"
export KUBECONFIG="$HOME/.kube/$USER-config"
echo "Checking if project namespace exists"
exists=$(ssh $SERVER "kubectl get namespace --output=jsonpath=\"{.items[?(@.metadata.name=='$USER')].metadata.name}\"")
if [ -z $exists ]; then
echo "Namespace not found, nothing to delete"
exit 1
else
echo "Namespace exists, deleting"
fi
echo "Deleting user namespace"
ssh $SERVER "kubectl delete -f $SERVER_USER_DIR/namespace.yaml"
echo "Deleting remote cert dir"
ssh $SERVER "rm -rf $SERVER_USER_DIR"
echo "Deleting local cert dir"
rm -rf $CERT_DIR
echo "Removing from kubeconfig"
rm $KUBECONFIG

View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Use
# ./setup.sh <server_fqdn>
export SERVER=$1
ssh -t $SERVER kubectl -n kube-system create secret generic certsigner --from-file /var/lib/rancher/k3s/server/tls/client-ca.crt --from-file /var/lib/rancher/k3s/server/tls/client-ca.key
scp ./userspace/certsigner.yaml $SERVER:~/certsigner.yaml
ssh $SERVER kubectl apply -f certsigner.yaml

View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Use
# ./upsert.sh <ssh_address> <server_fqdn (for kubectl)> <user>
# Note, do not specify https:// or :port for the fqdn, just give the domain
# Port is expected to be 6443. You can change this later in the generated conf
# ./upsert.sh node1 containers.reeseapps.com testuser
# ./upsert.sh 192.168.1.10 mydomain.ddns.net admin
export SERVER=$1
export FQDN=$2
export KUBE_USER=$3
export CERT_DIR=$HOME/.kube/$FQDN/users/$KUBE_USER
export CA_CERT_DIR=$HOME/.kube/$FQDN
export SERVER_USER_DIR="~/.kube/users/$KUBE_USER"
export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g')
export SERVER_USER="$KUBE_USER-$SERVER_NAME"
export KUBECONFIG="$HOME/.kube/$KUBE_USER-config"
if [ -z $KUBE_USER ]; then
echo "No arguments supplied! Format is ./upsert.sh <SERVER_FQDN> <USER>"
exit 1
fi
if [ -z $SERVER ]; then
echo "No server supplied for user $KUBE_USER"
exit 1
fi
if [ $KUBE_USER = "admin" ]; then
echo "Creating admin user for server $SERVER"
fi
echo "Creating cert dir"
mkdir -p $CERT_DIR
if [ $? -ne 0 ]; then
echo "Couldn't create cert dir at $CERT_DIR"
exit 1
fi
echo "Generating openssl cert"
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl genrsa -out /$KUBE_USER/$KUBE_USER.key 2048
if [ $KUBE_USER = "admin" ]; then
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=system:masters"
else
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=user"
fi
# /CN=admin/O=manager
if [ $? -ne 0 ]; then
echo "Couldn't create cert with Podman. Are you sure it's running?"
exit 1
fi
echo "Creating namespace dir on server"
ssh $SERVER "mkdir -p $SERVER_USER_DIR"
echo "Copying client csr to server cert dir"
scp $CERT_DIR/$KUBE_USER.csr $SERVER:$SERVER_USER_DIR/$KUBE_USER.csr
if [ $? -ne 0 ]; then
echo "Failed to copy client csr to server cert dir"
exit 1
fi
echo "Getting cert signing pod"
export CERT_POD=$(ssh $SERVER "kubectl get pod -n kube-system --selector=app=certsigner --output=jsonpath={.items..metadata.name}")
if [ -z $CERT_POD ]; then
echo "Installing certsigner"
helm template certsigner ./certsigner | ssh $SERVER "sudo -t -E kubectl apply -f -"
fi
while [ -z $CERT_POD ]; do
echo "Getting cert signing pod"
export CERT_POD=$(ssh $SERVER "kubectl get pod -n kube-system --selector=app=certsigner --output=jsonpath={.items..metadata.name}")
sleep 2
done
if [ $? -ne 0 ]; then
echo "Failed to install certsigner."
exit 1
fi
echo "Signing cert with pod $CERT_POD"
ssh $SERVER "kubectl -n kube-system cp $SERVER_USER_DIR/$KUBE_USER.csr $CERT_POD:/certs/$KUBE_USER.csr"
ssh $SERVER "kubectl -n kube-system exec $CERT_POD -- openssl x509 -in /certs/$KUBE_USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -set_serial $(python -c 'import random; print(random.randint(1000000000, 9999999999))') -out /certs/$KUBE_USER.crt -days 5000"
ssh $SERVER "kubectl -n kube-system cp $CERT_POD:/certs/$KUBE_USER.crt ~/.kube/users/$KUBE_USER/$KUBE_USER.crt"
echo "retrieving signed cert"
scp $SERVER:$SERVER_USER_DIR/$KUBE_USER.crt $CERT_DIR/$KUBE_USER.crt
echo "retrieving server ca"
wget --no-check-certificate https://$FQDN:6443/cacerts -O $CA_CERT_DIR/server-ca.pem
echo "creating $FQDN-$KUBE_USER context"
kubectl config set-context $FQDN-$KUBE_USER
echo "setting $FQDN-$KUBE_USER as current context"
kubectl config set current-context $FQDN-$KUBE_USER
echo "adding server to config with new context $FQDN-$KUBE_USER"
kubectl config set-cluster $FQDN --server=https://$FQDN:6443 --certificate-authority=$CA_CERT_DIR/server-ca.pem
kubectl config set contexts.$(kubectl config current-context).cluster $FQDN
echo "adding user to config file"
kubectl config set-credentials $SERVER_USER --client-certificate=$CERT_DIR/$KUBE_USER.crt --client-key=$CERT_DIR/$KUBE_USER.key
echo "setting user context"
kubectl config set contexts.$(kubectl config current-context).user $SERVER_USER
if [ $KUBE_USER = "admin" ]; then
echo "Admin user created, skipping namespace"
echo "export KUBECONFIG=$KUBECONFIG"
exit 0
fi
echo "Templating namespace with helm and copying to server"
helm template $KUBE_USER --set user=$KUBE_USER ./userspace | ssh $SERVER "cat - > $SERVER_USER_DIR/namespace.yaml"
if [ $? -ne 0 ]; then
echo "Failed to template namespace. Is helm installed?"
exit 1
fi
echo "Creating namespace from template"
ssh $SERVER "kubectl apply -f $SERVER_USER_DIR/namespace.yaml"
echo "Setting namespace context"
kubectl config set contexts.$(kubectl config current-context).namespace $KUBE_USER
if [ $? -ne 0 ]; then
echo "Failed to create namespace"
exit 1
fi
echo "export KUBECONFIG=$KUBECONFIG"

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: LimitRange
metadata:
name: default
namespace: {{ .Release.Name }}
spec:
limits:
- type: Container
default:
memory: 128Mi
cpu: 100m
defaultRequest:
memory: 1Mi
cpu: 1m

View File

@@ -0,0 +1,57 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: namespace-manager
namespace: {{ .Release.Name }}
rules:
- apiGroups:
- ""
- extensions
- apps
- batch
- autoscaling
- networking.k8s.io
- rbac.authorization.k8s.io
- metrics.k8s.io
- policy
- cert-manager.io
resources:
- deployments
- replicasets
- pods
- pods/exec
- pods/log
- pods/attach
- daemonsets
- statefulsets
- replicationcontrollers
- horizontalpodautoscalers
- services
- ingresses
- persistentvolumeclaims
- jobs
- cronjobs
- secrets
- configmaps
- serviceaccounts
- rolebindings
- ingressroutes
- middlewares
- endpoints
- deployments/scale
- poddisruptionbudgets
- certificates
- roles
verbs:
- "*"
- apiGroups:
- ""
- metrics.k8s.io
- rbac.authorization.k8s.io
- policy
resources:
- resourcequotas
- roles
verbs:
- list
- get

View File

@@ -0,0 +1,13 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: namespace-manager
namespace: {{ .Release.Name }}
subjects:
- kind: User
name: {{ .Values.user }}
apiGroup: ""
roleRef:
kind: Role
name: namespace-manager
apiGroup: ""

View File

@@ -0,0 +1,46 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: namespace-readonly
namespace: {{ .Release.Name }}
rules:
- apiGroups:
- ""
- extensions
- apps
- batch
- autoscaling
- networking.k8s.io
- traefik.containo.us
- rbac.authorization.k8s.io
- metrics.k8s.io
- storage.k8s.io
resources:
- deployments
- replicasets
- pods
- pods/exec
- pods/log
- pods/attach
- daemonsets
- statefulsets
- replicationcontrollers
- horizontalpodautoscalers
- services
- ingresses
- persistentvolumeclaims
- jobs
- cronjobs
- secrets
- configmaps
- serviceaccounts
- rolebindings
- ingressroutes
- middlewares
- resourcequotas
- roles
- endpoints
- clusterroles
verbs:
- list
- watch

View File

@@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: user-readonly
subjects:
- kind: User
name: {{ .Values.user }}
apiGroup: ""
roleRef:
kind: ClusterRole
name: user-readonly
apiGroup: ""

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Name }}

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: default
namespace: {{ .Release.Name }}
spec:
hard:
requests.cpu: "8"
requests.memory: "8Gi"
limits.cpu: "16"
limits.memory: "16Gi"
requests.storage: "500Gi"

View File

@@ -0,0 +1 @@
user: admin

View File

@@ -0,0 +1,51 @@
service:
# Don't use an external IP address
type: ClusterIP
ingress:
enabled: true
pathType: Prefix
# Change this
hostname: wordpress.reeseapps.com
annotations:
# Get a cert from letsencrypt
cert-manager.io/cluster-issuer: letsencrypt
# Use the nginx ingress class
kubernetes.io/ingress.class: nginx
# Allow infinitely large uploads (change this)
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.org/client-max-body-size: "0"
tls: true
resources:
# namespaces have strict "request" requirements
requests:
cpu: 100m
memory: 128Mi
# limits are unbounded - allow some breathing room
limits:
cpu: 2
memory: 1Gi
updateStrategy:
# Since the default storage is single-node mount we can't
# use the typical rolling update strategy because the new
# pod might try to start on a node without the storage
# mounted. We can get around this by tearing down the old
# pod before spinning up the new one. This will result in
# down time, we can also change the default storage to
# fix this.
type: Recreate
# These tolerations ensure that if one of my nodes goes down
# for some reason your pods will jump to the next available
# node within 1 second of it being unreachable.
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 1

View File

@@ -0,0 +1,6 @@
# Democratic CSI
## Retirement
This project has been deprecated in favor of local storage provided by rancher. Local storage is
much better for single node clusters (which is all I run right now) since it's far more performant.

View File

@@ -0,0 +1,69 @@
csiDriver:
name: "driveripper.zfs-gen-nfs-enc1"
storageClasses:
- name: zfs-gen-nfs-enc1
defaultClass: false
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: nfs
mountOptions:
- async
- noatime
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: zfs-generic-nfs
sshConnection:
host: driveripper.reeseapps.com
port: 22
username: democratic
privateKey: ""
zfs:
cli:
sudoEnabled: true
paths:
zfs: /usr/sbin/zfs
zpool: /usr/sbin/zpool
sudo: /usr/bin/sudo
chroot: /usr/sbin/chroot
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc1/dcsi/nfs
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: enc1/dcsi/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
# datasetPermissionsAcls:
# - "-m everyone@:full_set:allow"
#- "-m u:kube:full_set:allow"
nfs:
# https://docs.oracle.com/cd/E23824_01/html/821-1448/gayne.html
# https://www.hiroom2.com/2016/05/18/ubuntu-16-04-share-zfs-storage-via-nfs-smb/
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
#sharenfs: "rw,no_subtree_check,no_root_squash"
sharenfs: "on"
# share: ""
shareHost: "driveripper.reeselink.com"

View File

@@ -0,0 +1,85 @@
csiDriver:
name: "driveripper.zfs-iscsi-enc0"
# add note here about volume expansion requirements
storageClasses:
- name: zfs-iscsi-enc0
defaultClass: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
# for block-based storage can be ext3, ext4, xfs
fsType: xfs
mountOptions: []
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: freenas-api-iscsi
instance_id:
httpConnection:
protocol: https
host: driveripper.reeseapps.com
port: 443
apiKey: ""
allowInsecure: false
apiVersion: 2
zfs:
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc0/dcsi/apps
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: enc0/dcsi/snaps
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "democratic-csi-server.reeselink.com"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix: "dcsi-"
nameSuffix: "-enc0"
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
- targetGroupPortalGroup: 7
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1
# None, CHAP, or CHAP Mutual
targetGroupAuthType: None
# get the correct ID from the "Authorized Access" section of the UI
# only required if using Chap
targetGroupAuthGroup:
extentCommentTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
extentInsecureTpc: true
extentXenCompat: false
extentDisablePhysicalBlocksize: true
# 512, 1024, 2048, or 4096,
extentBlocksize: 512
# "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
extentRpm: "SSD"
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@@ -0,0 +1,84 @@
csiDriver:
name: "driveripper.zfs-iscsi-enc1"
# add note here about volume expansion requirements
storageClasses:
- name: zfs-iscsi-enc1
defaultClass: false
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
# for block-based storage can be ext3, ext4, xfs
fsType: xfs
mountOptions: []
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: freenas-api-iscsi
instance_id:
httpConnection:
protocol: https
host: driveripper.reeseapps.com
port: 443
apiKey: ""
allowInsecure: false
zfs:
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc1/dcsi/apps
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: enc1/dcsi/snaps
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "democratic-csi-server.reeselink.com"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix: "dcsi-"
nameSuffix: "-enc1"
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
- targetGroupPortalGroup: 7
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1
# None, CHAP, or CHAP Mutual
targetGroupAuthType: None
# get the correct ID from the "Authorized Access" section of the UI
# only required if using Chap
targetGroupAuthGroup:
extentCommentTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
extentInsecureTpc: true
extentXenCompat: false
extentDisablePhysicalBlocksize: true
# 512, 1024, 2048, or 4096,
extentBlocksize: 512
# "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
extentRpm: "SSD"
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@@ -0,0 +1,53 @@
csiDriver:
name: "driveripper.zfs-nfs-enc1"
storageClasses:
- name: zfs-nfs-enc1
defaultClass: false
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
fsType: nfs
mountOptions:
- async
- noatime
secrets:
provisioner-secret:
controller-publish-secret:
node-stage-secret:
node-publish-secret:
controller-expand-secret:
volumeSnapshotClasses: []
driver:
config:
driver: freenas-api-nfs
instance_id:
httpConnection:
protocol: https
host: driveripper.reeseapps.com
port: 443
apiKey: ""
allowInsecure: true
zfs:
datasetProperties:
"org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
datasetParentName: enc1/dcsi/nfs
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: enc1/dcsi/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: democratic-csi-server.reeselink.com
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks:
- "fd00:fd41:d0f1:1010::0/64"
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""