apiVersion: kubeadm.k8s.io/v1beta2 kind: InitConfiguration nodeRegistration: criSocket: "unix:///var/run/crio/crio.sock" kubeletExtraArgs: cgroup-driver: "cgroupfs" --- apiVersion: kubeadm.k8s.io/v1beta2 kind: JoinConfiguration nodeRegistration: criSocket: "unix:///var/run/crio/crio.sock" kubeletExtraArgs: cgroup-driver: "cgroupfs" --- apiVersion: kubeadm.k8s.io/v1beta2 kind: ClusterConfiguration clusterName: ungleich apiServer: certSANs: - "2a0a:e5c0:10:bee:21b:fcff:feee:f4bc" - "asus3.place10.ungleich.ch" # TODO: controlPlaneEndpoint could/should be a VIP when running HA (e.g. keepalived on master nodes) controlPlaneEndpoint: "[2a0a:e5c0:10:bee:21b:fcff:feee:f4bc]:6443" networking: dnsDomain: cluster.local podSubnet: 2a0a:e5c0:10:104::/64 serviceSubnet: 2a0a:e5c0:10:105::/108 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: "cgroupfs" --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clientConnection: # Prevent kube-proxy from starting as it would create iptables rules and what # not that conflict with kube-router/cilium. Still have to delete the daemonset # manually after `kubeadm upgrade`. kubeconfig: invalid-kubeconfig.conf