From 323a19652bc5539339e286a27717b5ea310b3a67 Mon Sep 17 00:00:00 2001 From: Nico Schottelius Date: Thu, 5 Aug 2021 21:06:04 +0200 Subject: [PATCH] ++rook ++ change network for p6.k8s.ooo --- k8s/p6/kubeadm.yaml | 4 +- rook/README.md | 3 +- rook/values.yaml | 135 ++++++++++++++++++++++---------------------- 3 files changed, 71 insertions(+), 71 deletions(-) diff --git a/k8s/p6/kubeadm.yaml b/k8s/p6/kubeadm.yaml index 76099fd..b031909 100644 --- a/k8s/p6/kubeadm.yaml +++ b/k8s/p6/kubeadm.yaml @@ -4,8 +4,8 @@ apiVersion: kubeadm.k8s.io/v1beta2 kubernetesVersion: v1.21.2 networking: dnsDomain: p6.k8s.ooo - podSubnet: 2a0a:e5c0:2:13::/64 - serviceSubnet: 2a0a:e5c0:2:14::/108 + podSubnet: 2a0a:e5c0:12:1::/64 + serviceSubnet: 2a0a:e5c0:12::/108 controlPlaneEndpoint: "p6-api.k8s.ooo:6443" --- kind: KubeletConfiguration diff --git a/rook/README.md b/rook/README.md index 7277797..7d5c165 100644 --- a/rook/README.md +++ b/rook/README.md @@ -37,7 +37,8 @@ done helm repo add rook-release https://charts.rook.io/release helm repo update helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph - +helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \ + --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/values.yaml ``` ## Debugging / ceph toolbox diff --git a/rook/values.yaml b/rook/values.yaml index bed5485..050f947 100644 --- a/rook/values.yaml +++ b/rook/values.yaml @@ -96,25 +96,9 @@ cephClusterSpec: # port: 8443 # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings - # network: - # # enable host networking - # provider: host - # # EXPERIMENTAL: enable the Multus network provider - # provider: multus - # selectors: - # # The selector keys are required to be `public` and `cluster`. - # # Based on the configuration, the operator will do the following: - # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface - # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' - # # - # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus - # # - # # public: public-conf --> NetworkAttachmentDefinition object name in Multus - # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus - # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 - # ipFamily: "IPv6" - # # Ceph daemons to listen on both IPv4 and Ipv6 networks - # dualStack: false + network: + ipFamily: "IPv6" + dualStack: false # enable the crash collector for ceph daemon crash collection crashCollector: @@ -308,37 +292,50 @@ ingress: # tls: cephBlockPools: - - name: ceph-blockpool + - name: ceph-blockpool-hdd # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration spec: failureDomain: host replicated: size: 3 + deviceClass: "hdd" storageClass: enabled: true - name: ceph-block + name: ceph-block-hdd isDefault: true reclaimPolicy: Delete allowVolumeExpansion: true # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration parameters: - # (optional) mapOptions is a comma-separated list of map options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # mapOptions: lock_on_read,queue_depth=1024 - - # (optional) unmapOptions is a comma-separated list of unmap options. - # For krbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd/#kernel-rbd-krbd-options - # For nbd options refer - # https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options - # unmapOptions: force - - # RBD image format. Defaults to "2". imageFormat: "2" - # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. + imageFeatures: layering + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + - name: ceph-blockpool-ssd + # see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration + spec: + failureDomain: host + replicated: + size: 3 + deviceClass: "ssd" + storageClass: + enabled: true + name: ceph-block-ssd + isDefault: false + reclaimPolicy: Delete + allowVolumeExpansion: true + # see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration + parameters: + imageFormat: "2" imageFeatures: layering # The secrets contain Ceph admin credentials. csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner @@ -353,16 +350,18 @@ cephBlockPools: csi.storage.k8s.io/fstype: ext4 cephFileSystems: - - name: ceph-filesystem + - name: ceph-filesystem-ssd # see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration spec: metadataPool: replicated: size: 3 + deviceClass: "ssd" dataPools: - failureDomain: host replicated: size: 3 + deviceClass: "ssd" metadataServer: activeCount: 1 activeStandby: true @@ -384,33 +383,33 @@ cephFileSystems: # in hyperconverged settings where the volume is mounted on the same node as the osds. csi.storage.k8s.io/fstype: ext4 -cephObjectStores: - - name: ceph-objectstore - # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration - spec: - metadataPool: - failureDomain: host - replicated: - size: 3 - dataPool: - failureDomain: host - erasureCoded: - dataChunks: 2 - codingChunks: 1 - preservePoolsOnDelete: true - gateway: - port: 80 - # securePort: 443 - # sslCertificateRef: - instances: 1 - healthCheck: - bucket: - interval: 60s - storageClass: - enabled: true - name: ceph-bucket - reclaimPolicy: Delete - # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration - parameters: - # note: objectStoreNamespace and objectStoreName are configured by the chart - region: us-east-1 +# cephObjectStores: +# - name: ceph-objectstore +# # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration +# spec: +# metadataPool: +# failureDomain: host +# replicated: +# size: 3 +# dataPool: +# failureDomain: host +# erasureCoded: +# dataChunks: 2 +# codingChunks: 1 +# preservePoolsOnDelete: true +# gateway: +# port: 80 +# # securePort: 443 +# # sslCertificateRef: +# instances: 1 +# healthCheck: +# bucket: +# interval: 60s +# storageClass: +# enabled: true +# name: ceph-bucket +# reclaimPolicy: Delete +# # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration +# parameters: +# # note: objectStoreNamespace and objectStoreName are configured by the chart +# region: us-east-1