update general

This commit is contained in:
Nico Schottelius 2021-06-14 19:38:45 +02:00
parent 71c3ee1d54
commit e2520ce826
5 changed files with 178 additions and 8 deletions

View file

@ -21,3 +21,19 @@ This project is testing, deploying and using IPv6 only k8s clusters.
* argocd (?) for CI and upgrades * argocd (?) for CI and upgrades
* Maybe LoadBalancer support (our ClusterIP already does that though) * Maybe LoadBalancer support (our ClusterIP already does that though)
* (Other) DNS entrys for services * (Other) DNS entrys for services
## Cluster setup
* Calico CNI with BGP peering to our upstream infrastructure
* Rook for RBD and CephFS support
### Init
We are using a custom kubeadm.conf to
* configure the cgroupdriver
* configure the IP addresses
```
kubeadm init --config v3-calico/kubeadm-config-p7-v2.yaml
```

View file

@ -50,17 +50,18 @@ spec:
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: nginx-port-80 name: alpine-cert-test
labels:
app: nginx-port-80
spec: spec:
containers: containers:
- name: nginx - name: alpine
image: nginx:1.21-alpine image: alpine:3.13
args:
- sleep
- "1000000"
volumeMounts: volumeMounts:
- mountPath: "/etc/letsencrypt" - mountPath: "/etc/letsencrypt"
name: letsencryptdir name: letsencryptdir
- mountPath: "/usr/share/nginx/html" - mountPath: "/www"
name: webroot name: webroot
volumes: volumes:
- name: letsencryptdir - name: letsencryptdir
@ -71,6 +72,57 @@ spec:
claimName: nginx-webroot claimName: nginx-webroot
--- ---
apiVersion: v1 apiVersion: v1
kind: Pod
metadata:
name: nginx-port-80
labels:
app: ssl
spec:
containers:
- name: nginx
image: nginx:1.21-alpine
volumeMounts:
- mountPath: "/etc/letsencrypt"
name: letsencryptdir
- mountPath: "/usr/share/nginx/html"
name: webroot
ports:
- containerPort: 80
volumes:
- name: letsencryptdir
persistentVolumeClaim:
claimName: nginx-certs
- name: webroot
persistentVolumeClaim:
claimName: nginx-webroot
# ---
# apiVersion: v1
# kind: Pod
# metadata:
# name: nginx-ssl
# labels:
# app: nginx-ssl
# ssl: yes
# spec:
# containers:
# - name: nginx
# image: nginx:1.21-alpine
# volumeMounts:
# - mountPath: "/etc/letsencrypt"
# name: letsencryptdir
# - mountPath: "/usr/share/nginx/html"
# name: webroot
# ports:
# - containerPort: 443
# volumes:
# - name: letsencryptdir
# persistentVolumeClaim:
# claimName: nginx-certs
# - name: webroot
# persistentVolumeClaim:
# claimName: nginx-webroot
---
apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: svc1 name: svc1
@ -78,5 +130,25 @@ spec:
type: ClusterIP type: ClusterIP
ports: ports:
- port: 80 - port: 80
name: "http"
- port: 443
name: "https"
selector: selector:
app: nginx-port-80 app: ssl
# ---
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: nginx-ssl-config
# data:
# # property-like keys; each key maps to a simple value
# nick_name: "13"
# # file-like keys
# game.properties: |
# enemy.types=aliens,monsters
# player.maximum-lives=5
# user-interface.properties: |
# color.good=purple
# color.bad=yellow
# allow.textmode=true

View file

@ -22,6 +22,15 @@ for yaml in crds common operator cluster storageclass toolbox; do
done done
``` ```
Deleting (in case of teardown):
```
for yaml in crds common operator cluster storageclass toolbox; do
kubectl delete -f ${yaml}.yaml
done
```
## Debugging / ceph toolbox ## Debugging / ceph toolbox
``` ```

73
rook/osd-purge.yaml Normal file
View file

@ -0,0 +1,73 @@
#################################################################################################################
# We need many operations to remove OSDs as written in Documentation/ceph-osd-mgmt.md.
# This job can automate some of that operations: mark OSDs as `out`, purge these OSDs,
# and delete the corresponding resources like OSD deployments, OSD prepare jobs, and PVCs.
#
# Please note the following.
#
# - This job only works for `down` OSDs.
# - This job doesn't wait for backfilling to be completed.
#
# If you want to remove `up` OSDs and/or want to wait for backfilling to be completed between each OSD removal,
# please do it by hand.
#################################################################################################################
apiVersion: batch/v1
kind: Job
metadata:
name: rook-ceph-purge-osd
namespace: rook-ceph # namespace:operator
labels:
app: rook-ceph-purge-osd
spec:
template:
spec:
serviceAccountName: rook-ceph-system
containers:
- name: osd-removal
image: rook/ceph:master
# TODO: Insert the OSD ID in the last parameter that is to be removed
# The OSD IDs are a comma-separated list. For example: "0" or "0,2".
args: ["ceph", "osd", "remove", "--osd-ids", "3,1,4,2,5"]
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ROOK_MON_ENDPOINTS
valueFrom:
configMapKeyRef:
key: data
name: rook-ceph-mon-endpoints
- name: ROOK_CEPH_USERNAME
valueFrom:
secretKeyRef:
key: ceph-username
name: rook-ceph-mon
- name: ROOK_CEPH_SECRET
valueFrom:
secretKeyRef:
key: ceph-secret
name: rook-ceph-mon
- name: ROOK_CONFIG_DIR
value: /var/lib/rook
- name: ROOK_CEPH_CONFIG_OVERRIDE
value: /etc/rook/config/override.conf
- name: ROOK_FSID
valueFrom:
secretKeyRef:
key: fsid
name: rook-ceph-mon
- name: ROOK_LOG_LEVEL
value: DEBUG
volumeMounts:
- mountPath: /etc/ceph
name: ceph-conf-emptydir
- mountPath: /var/lib/rook
name: rook-config
volumes:
- emptyDir: {}
name: ceph-conf-emptydir
- emptyDir: {}
name: rook-config
restartPolicy: Never

View file

@ -1,7 +1,7 @@
# kubeadm-config.yaml # kubeadm-config.yaml
kind: ClusterConfiguration kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta2 apiVersion: kubeadm.k8s.io/v1beta2
kubernetesVersion: v1.21.0 kubernetesVersion: v1.21.1
networking: networking:
dnsDomain: k8s.place7.ungleich.ch dnsDomain: k8s.place7.ungleich.ch
podSubnet: 2a0a:e5c0:13:e1::/64 podSubnet: 2a0a:e5c0:13:e1::/64