Add v1 (thanks to a good friend)

Signed-off-by: Nico Schottelius <nico@nico-notebook.schottelius.org>
This commit is contained in:
Nico Schottelius 2021-03-21 18:12:20 +01:00
commit 4984b3c51a
5 changed files with 217 additions and 0 deletions

138
v1/README.md Normal file
View file

@ -0,0 +1,138 @@
# Kubernetes on alpine linux
Note: the kubeadm and cilium config files where created for use with asus3.place10.ungleich.ch
## Configure OS
```
sysctl -w net.ipv6.conf.all.accept_ra=2
sysctl -w net.ipv6.conf.all.forwarding=1
sysctl -w net.ipv4.ip_forward=1
modprobe br_netfilter
apk update
apk add ip6tables
apk add git
# for cilium
mount --make-shared /sys
mount bpffs /sys/fs/bpf -t bpf
```
---
## Install and configure container runtime
```
apk add cri-o
cat > /etc/crio/crio.conf.d/override.conf << DONE
[crio.runtime]
# pivot_root does not work on tmpfs
no_pivot = true
# Overide defaults to not use systemd cgroups.
conmon_cgroup = "pod"
cgroup_manager = "cgroupfs"
DONE
rc-update add crio default
service start crio
# Make sure OS packages and cilium use the same cni-bin dir
rm -rf /opt/cni/bin
cd /opt/cni
ln -s ../../usr/libexec/cni bin
```
### Optional cri tools.
```
apk add cri-tools
cat > /etc/crictl.yaml << DONE
runtime-endpoint: unix:///run/crio/crio.sock
image-endpoint: unix:///run/crio/crio.sock
timeout: 10
debug: false
DONE
```
### Test if we can talk to cri-o
```
crictl info
```
---
## Install kubeadm and friends
```
apk add kubeadm
apk add kubelet
# Save yourself lot's of typing
cd /usr/local/bin/
ln -s ../../bin/kubectl k
```
## Install kubelet
```
apk add kubectl
apk add kubelet
rc-update add kubelet default
# Start kubelet as kubeadm can not do that on alpine
service start kubelet
```
---
## Bootstrap kubernetes cluster (only on the first control plane node)
```
kubeadm init phase preflight --config ./kubeadm-config.yaml
kubeadm config images pull --config ./kubeadm-config.yaml
kubeadm init --config ./kubeadm-config.yaml --skip-phases=addon/kube-proxy
# Untaint master to allow running workloads on master nodes (for POC)
kubectl taint nodes --all node-role.kubernetes.io/master-
```
---
## Configure cluster (ideally from laptop/external machine)
```
# install helm (the version from alpine causes problems with helmfile)
cd /tmp
wget https://get.helm.sh/helm-v3.5.2-linux-amd64.tar.gz
tar -xvzf helm-v3.5.2-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/
chmod +x /usr/local/bin/helm
# install helm diff plugin
helm plugin install https://github.com/databus23/helm-diff
# install helmfile (usually on laptop/external node)
cd /tmp
wget https://github.com/roboll/helmfile/releases/download/v0.138.4/helmfile_linux_amd64
mv /usr/bin/helmfile_linux_amd64 /usr/local/bin/
chmod +x /usr/local/bin/helmfile
# Setup KUBECONFIG when running on the master node.
# Configure ~/.kube/config when running from laptop.
export KUBECONFIG=/etc/kubernetes/admin.conf
# Install cilium using helmfile and local values file
cd cilium/
helmfile diff
helmfile sync
```

5
v1/cilium/README.md Normal file
View file

@ -0,0 +1,5 @@
1. make sure your environment points at the right cluster
2. helmfile diff
3. helmfile sync

12
v1/cilium/helmfile.yaml Normal file
View file

@ -0,0 +1,12 @@
repositories:
- name: cilium
url: https://helm.cilium.io
releases:
- name: cilium
namespace: cilium
chart: cilium/cilium
version: 1.9.0
values:
- ./values.yaml

22
v1/cilium/values.yaml Normal file
View file

@ -0,0 +1,22 @@
# https://github.com/cilium/cilium/blob/master/install/kubernetes/cilium/values.yaml
#
# https://docs.cilium.io/en/stable/gettingstarted/kubeproxy-free/
kubeProxyReplacement: strict
#k8sServiceHost: 2a0a:e5c0:10:bee:21b:fcff:feee:f4bc
k8sServiceHost: asus3.place10.ungleich.ch
k8sServicePort: 6443
ipv4:
enabled: false
ipv6:
enabled: true
tunnel: disabled
ipam:
mode: kubernetes
endpointRoutes:
enabled: true
nativeRoutingCIDR: 2a0a:e5c0:10:104::/64

40
v1/kubeadm-config.yaml Normal file
View file

@ -0,0 +1,40 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
nodeRegistration:
criSocket: "unix:///var/run/crio/crio.sock"
kubeletExtraArgs:
cgroup-driver: "cgroupfs"
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
nodeRegistration:
criSocket: "unix:///var/run/crio/crio.sock"
kubeletExtraArgs:
cgroup-driver: "cgroupfs"
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
clusterName: ungleich
apiServer:
certSANs:
- "2a0a:e5c0:10:bee:21b:fcff:feee:f4bc"
- "asus3.place10.ungleich.ch"
# TODO: controlPlaneEndpoint could/should be a VIP when running HA (e.g. keepalived on master nodes)
controlPlaneEndpoint: "[2a0a:e5c0:10:bee:21b:fcff:feee:f4bc]:6443"
networking:
dnsDomain: cluster.local
podSubnet: 2a0a:e5c0:10:104::/64
serviceSubnet: 2a0a:e5c0:10:105::/108
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: "cgroupfs"
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clientConnection:
# Prevent kube-proxy from starting as it would create iptables rules and what
# not that conflict with kube-router/cilium. Still have to delete the daemonset
# manually after `kubeadm upgrade`.
kubeconfig: invalid-kubeconfig.conf