moving everything to active or retired vs incubating and graduated
All checks were successful
Reese's Arch Toolbox / build-and-push-arch-toolbox (push) Successful in 14s

This commit is contained in:
2025-04-19 18:46:40 -04:00
parent 6e393d90ee
commit ef9104c796
234 changed files with 456 additions and 244 deletions

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,23 @@
apiVersion: v2
name: namespace
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.0.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View File

@@ -0,0 +1,40 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: certsigner
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: certsigner
template:
metadata:
labels:
app: certsigner
spec:
containers:
- name: certsigner
image: python:latest
command: ["cat"]
tty: true
resources:
requests:
memory: 1Mi
cpu: 1m
limits:
memory: 100Mi
cpu: 100m
volumeMounts:
- mountPath: /keys
name: keys
- mountPath: /certs
name: certs
volumes:
- name: keys
secret:
secretName: certsigner
- name: certs
emptyDir:
sizeLimit: 500Mi
restartPolicy: Always

View File

@@ -0,0 +1,38 @@
#!/bin/bash
# Use
# ./removeuserspace <ssh_address> <server_fqdn (for kubectl)> <user>
export SERVER=$1
export FQDN=$2
export USER=$3
export CERT_DIR=$HOME/.kube/$FQDN/users/$USER
export CA_CERT_DIR=$HOME/.kube/$FQDN
export SERVER_USER_DIR="~/.kube/users/$USER"
export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g')
export SERVER_USER="$USER-$SERVER_NAME"
export KUBECONFIG="$HOME/.kube/$USER-config"
echo "Checking if project namespace exists"
exists=$(ssh $SERVER "kubectl get namespace --output=jsonpath=\"{.items[?(@.metadata.name=='$USER')].metadata.name}\"")
if [ -z $exists ]; then
echo "Namespace not found, nothing to delete"
exit 1
else
echo "Namespace exists, deleting"
fi
echo "Deleting user namespace"
ssh $SERVER "kubectl delete -f $SERVER_USER_DIR/namespace.yaml"
echo "Deleting remote cert dir"
ssh $SERVER "rm -rf $SERVER_USER_DIR"
echo "Deleting local cert dir"
rm -rf $CERT_DIR
echo "Removing from kubeconfig"
rm $KUBECONFIG

View File

@@ -0,0 +1,10 @@
#!/bin/bash
# Use
# ./setup.sh <server_fqdn>
export SERVER=$1
ssh -t $SERVER kubectl -n kube-system create secret generic certsigner --from-file /var/lib/rancher/k3s/server/tls/client-ca.crt --from-file /var/lib/rancher/k3s/server/tls/client-ca.key
scp ./userspace/certsigner.yaml $SERVER:~/certsigner.yaml
ssh $SERVER kubectl apply -f certsigner.yaml

View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Use
# ./upsert.sh <ssh_address> <server_fqdn (for kubectl)> <user>
# Note, do not specify https:// or :port for the fqdn, just give the domain
# Port is expected to be 6443. You can change this later in the generated conf
# ./upsert.sh node1 containers.reeseapps.com testuser
# ./upsert.sh 192.168.1.10 mydomain.ddns.net admin
export SERVER=$1
export FQDN=$2
export KUBE_USER=$3
export CERT_DIR=$HOME/.kube/$FQDN/users/$KUBE_USER
export CA_CERT_DIR=$HOME/.kube/$FQDN
export SERVER_USER_DIR="~/.kube/users/$KUBE_USER"
export SERVER_NAME=$(echo "$FQDN" | sed 's/\./-/g')
export SERVER_USER="$KUBE_USER-$SERVER_NAME"
export KUBECONFIG="$HOME/.kube/$KUBE_USER-config"
if [ -z $KUBE_USER ]; then
echo "No arguments supplied! Format is ./upsert.sh <SERVER_FQDN> <USER>"
exit 1
fi
if [ -z $SERVER ]; then
echo "No server supplied for user $KUBE_USER"
exit 1
fi
if [ $KUBE_USER = "admin" ]; then
echo "Creating admin user for server $SERVER"
fi
echo "Creating cert dir"
mkdir -p $CERT_DIR
if [ $? -ne 0 ]; then
echo "Couldn't create cert dir at $CERT_DIR"
exit 1
fi
echo "Generating openssl cert"
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl genrsa -out /$KUBE_USER/$KUBE_USER.key 2048
if [ $KUBE_USER = "admin" ]; then
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=system:masters"
else
podman run -it -v $CERT_DIR:/$KUBE_USER python:latest openssl req -new -key /$KUBE_USER/$KUBE_USER.key -out /$KUBE_USER/$KUBE_USER.csr -subj "/CN=$KUBE_USER/O=user"
fi
# /CN=admin/O=manager
if [ $? -ne 0 ]; then
echo "Couldn't create cert with Podman. Are you sure it's running?"
exit 1
fi
echo "Creating namespace dir on server"
ssh $SERVER "mkdir -p $SERVER_USER_DIR"
echo "Copying client csr to server cert dir"
scp $CERT_DIR/$KUBE_USER.csr $SERVER:$SERVER_USER_DIR/$KUBE_USER.csr
if [ $? -ne 0 ]; then
echo "Failed to copy client csr to server cert dir"
exit 1
fi
echo "Getting cert signing pod"
export CERT_POD=$(ssh $SERVER "kubectl get pod -n kube-system --selector=app=certsigner --output=jsonpath={.items..metadata.name}")
if [ -z $CERT_POD ]; then
echo "Installing certsigner"
helm template certsigner ./certsigner | ssh $SERVER "sudo -t -E kubectl apply -f -"
fi
while [ -z $CERT_POD ]; do
echo "Getting cert signing pod"
export CERT_POD=$(ssh $SERVER "kubectl get pod -n kube-system --selector=app=certsigner --output=jsonpath={.items..metadata.name}")
sleep 2
done
if [ $? -ne 0 ]; then
echo "Failed to install certsigner."
exit 1
fi
echo "Signing cert with pod $CERT_POD"
ssh $SERVER "kubectl -n kube-system cp $SERVER_USER_DIR/$KUBE_USER.csr $CERT_POD:/certs/$KUBE_USER.csr"
ssh $SERVER "kubectl -n kube-system exec $CERT_POD -- openssl x509 -in /certs/$KUBE_USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -set_serial $(python -c 'import random; print(random.randint(1000000000, 9999999999))') -out /certs/$KUBE_USER.crt -days 5000"
ssh $SERVER "kubectl -n kube-system cp $CERT_POD:/certs/$KUBE_USER.crt ~/.kube/users/$KUBE_USER/$KUBE_USER.crt"
echo "retrieving signed cert"
scp $SERVER:$SERVER_USER_DIR/$KUBE_USER.crt $CERT_DIR/$KUBE_USER.crt
echo "retrieving server ca"
wget --no-check-certificate https://$FQDN:6443/cacerts -O $CA_CERT_DIR/server-ca.pem
echo "creating $FQDN-$KUBE_USER context"
kubectl config set-context $FQDN-$KUBE_USER
echo "setting $FQDN-$KUBE_USER as current context"
kubectl config set current-context $FQDN-$KUBE_USER
echo "adding server to config with new context $FQDN-$KUBE_USER"
kubectl config set-cluster $FQDN --server=https://$FQDN:6443 --certificate-authority=$CA_CERT_DIR/server-ca.pem
kubectl config set contexts.$(kubectl config current-context).cluster $FQDN
echo "adding user to config file"
kubectl config set-credentials $SERVER_USER --client-certificate=$CERT_DIR/$KUBE_USER.crt --client-key=$CERT_DIR/$KUBE_USER.key
echo "setting user context"
kubectl config set contexts.$(kubectl config current-context).user $SERVER_USER
if [ $KUBE_USER = "admin" ]; then
echo "Admin user created, skipping namespace"
echo "export KUBECONFIG=$KUBECONFIG"
exit 0
fi
echo "Templating namespace with helm and copying to server"
helm template $KUBE_USER --set user=$KUBE_USER ./userspace | ssh $SERVER "cat - > $SERVER_USER_DIR/namespace.yaml"
if [ $? -ne 0 ]; then
echo "Failed to template namespace. Is helm installed?"
exit 1
fi
echo "Creating namespace from template"
ssh $SERVER "kubectl apply -f $SERVER_USER_DIR/namespace.yaml"
echo "Setting namespace context"
kubectl config set contexts.$(kubectl config current-context).namespace $KUBE_USER
if [ $? -ne 0 ]; then
echo "Failed to create namespace"
exit 1
fi
echo "export KUBECONFIG=$KUBECONFIG"

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: LimitRange
metadata:
name: default
namespace: {{ .Release.Name }}
spec:
limits:
- type: Container
default:
memory: 128Mi
cpu: 100m
defaultRequest:
memory: 1Mi
cpu: 1m

View File

@@ -0,0 +1,57 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: namespace-manager
namespace: {{ .Release.Name }}
rules:
- apiGroups:
- ""
- extensions
- apps
- batch
- autoscaling
- networking.k8s.io
- rbac.authorization.k8s.io
- metrics.k8s.io
- policy
- cert-manager.io
resources:
- deployments
- replicasets
- pods
- pods/exec
- pods/log
- pods/attach
- daemonsets
- statefulsets
- replicationcontrollers
- horizontalpodautoscalers
- services
- ingresses
- persistentvolumeclaims
- jobs
- cronjobs
- secrets
- configmaps
- serviceaccounts
- rolebindings
- ingressroutes
- middlewares
- endpoints
- deployments/scale
- poddisruptionbudgets
- certificates
- roles
verbs:
- "*"
- apiGroups:
- ""
- metrics.k8s.io
- rbac.authorization.k8s.io
- policy
resources:
- resourcequotas
- roles
verbs:
- list
- get

View File

@@ -0,0 +1,13 @@
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: namespace-manager
namespace: {{ .Release.Name }}
subjects:
- kind: User
name: {{ .Values.user }}
apiGroup: ""
roleRef:
kind: Role
name: namespace-manager
apiGroup: ""

View File

@@ -0,0 +1,46 @@
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: namespace-readonly
namespace: {{ .Release.Name }}
rules:
- apiGroups:
- ""
- extensions
- apps
- batch
- autoscaling
- networking.k8s.io
- traefik.containo.us
- rbac.authorization.k8s.io
- metrics.k8s.io
- storage.k8s.io
resources:
- deployments
- replicasets
- pods
- pods/exec
- pods/log
- pods/attach
- daemonsets
- statefulsets
- replicationcontrollers
- horizontalpodautoscalers
- services
- ingresses
- persistentvolumeclaims
- jobs
- cronjobs
- secrets
- configmaps
- serviceaccounts
- rolebindings
- ingressroutes
- middlewares
- resourcequotas
- roles
- endpoints
- clusterroles
verbs:
- list
- watch

View File

@@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: user-readonly
subjects:
- kind: User
name: {{ .Values.user }}
apiGroup: ""
roleRef:
kind: ClusterRole
name: user-readonly
apiGroup: ""

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Name }}

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: default
namespace: {{ .Release.Name }}
spec:
hard:
requests.cpu: "8"
requests.memory: "8Gi"
limits.cpu: "16"
limits.memory: "16Gi"
requests.storage: "500Gi"

View File

@@ -0,0 +1,210 @@
# Project Userspace
## One provisioner to rule them all
### Quickstart
```bash
./userspace/scripts/setup.sh
./userspace/scripts/upsertuser.sh <server_fqdn> <username>
./userspace/scripts/removeuser.sh <server_fqdn> <username>
```
### Update a user
```bash
export USER=user
helm template $USER ./namespace | kubectl --context admin apply -f -
```
### Objectives
1. Provision a namespace with clusterroles, rolebindings, and a dedicated nfs-provisioner with one helm chart
2. Create an easy way for users to sign their certificates
3. Create a cleanup script without deleting user data
4. profit
### Userspace
#### Namespace
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: {{ .Release.Name }}
```
#### Roles
```yaml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: namespace-manager
namespace: {{ .Release.Name }}
rules:
- apiGroups:
- ""
- extensions
- apps
- batch
- autoscaling
- networking.k8s.io
- traefik.containo.us
- rbac.authorization.k8s.io
- metrics.k8s.io
resources:
- deployments
- replicasets
- pods
- pods/exec
- pods/log
- pods/attach
- daemonsets
- statefulsets
- replicationcontrollers
- horizontalpodautoscalers
- services
- ingresses
- persistentvolumeclaims
- jobs
- cronjobs
- secrets
- configmaps
- serviceaccounts
- rolebindings
- ingressroutes
- middlewares
- endpoints
verbs:
- "*"
- apiGroups:
- ""
- metrics.k8s.io
- rbac.authorization.k8s.io
resources:
- resourcequotas
- roles
verbs:
- list
```
#### Rolebinding
```yaml
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: {{ .Release.Name }}
name: namespace-manager
subjects:
- kind: User
name: {{ .Release.Name }}
apiGroup: ""
roleRef:
kind: ClusterRole
name: namespace-manager
apiGroup: ""
```
### Create a kubernetes certsigner pod
This keeps the client-ca crt and key secret and allows the cert to be signed and stored on the pod
#### Create the certsigner secret
```bash
kubectl -n kube-system create secret generic certsigner --from-file /var/lib/rancher/k3s/server/tls/client-ca.crt --from-file /var/lib/rancher/k3s/server/tls/client-ca.key
```
#### Set up the certsigner pod
```bash
kubectl --context admin apply -f certsigner
```
#### Generate a cert
```bash
export USER=<user>
docker run -it -v $(pwd)/users/$USER:/$USER python:latest openssl genrsa -out /$USER/$USER.key 2048
docker run -it -v $(pwd)/users/$USER:/$USER python:latest openssl req -new -key /$USER/$USER.key -out /$USER/$USER.csr -subj "/CN=$USER/O=user"
```
#### Create a new Userspace
```bash
helm template $USER ./namespace | kubectl --context admin apply -f -
```
#### Sign the cert
```bash
export USER=<user>
kubectl --context admin cp $(pwd)/users/$USER/$USER.csr certsigner:/certs/$USER.csr
kubectl --context admin exec -it --context admin certsigner -- openssl x509 -in /certs/$USER.csr -req -CA /keys/client-ca.crt -CAkey /keys/client-ca.key -CAcreateserial -out /certs/$USER.crt -days 5000
kubectl --context admin cp certsigner:/certs/$USER.crt $(pwd)/users/$USER/$USER.crt
```
#### Add to the config
```bash
kubectl config set-credentials $USER --client-certificate=$USER.crt --client-key=$USER.key
kubectl config set-context $USER --cluster=mainframe --namespace=$USER --user=$USER
```
#### Delete
```bash
kubectl config delete-context $USER
helm template $USER ./namespace | kubectl --context admin delete -f -
```
### Signing a user cert - detailed notes
NOTE: ca.crt and ca.key are in /var/lib/rancher/k3s/server/tls/client-ca.*
```bash
# First we create the credentials
# /CN=<username> - the user
# /O=<group> - the group
# Navigate to the user directory
export USER=<username>
cd $USER
# Generate a private key
openssl genrsa -out $USER.key 2048
# Check the key
# openssl pkey -in ca.key -noout -text
# Generate and send me the CSR
# The "user" group is my default group
openssl req -new -key $USER.key -out $USER.csr -subj "/CN=$USER/O=user"
# Check the CSR
# openssl req -in $USER.csr -noout -text
# If satisfactory, sign the CSR
# Copy from /var/lib/rancher/k3s/server/tls/client-ca.crt and client-ca.key
openssl x509 -req -in $USER.csr -CA ../client-ca.crt -CAkey ../client-ca.key -CAcreateserial -out $USER.crt -days 5000
# Review the certificate
# openssl x509 -in $USER.crt -text -noout
# Send back the crt
# cp $USER.crt $USER.key ../server-ca.crt ~/.kube/
kubectl config set-credentials $USER --client-certificate=$USER.crt --client-key=$USER.key
kubectl config set-context $USER --cluster=mainframe --namespace=$USER --user=$USER
# Now we create the namespace, rolebindings, and resource quotas
# kubectl apply -f k8s/
# Add the cluster
# CA file can be found at https://3.14.3.100:6443/cacerts
- cluster:
certificate-authority: server-ca.crt
server: https://3.14.3.100:6443
name: mainframe
# Test if everything worked
kubectl --context=$USER-context get pods
```

View File

@@ -0,0 +1 @@
user: admin