Add helm values for rook

This commit is contained in:
Nico Schottelius 2021-08-08 16:23:20 +02:00
parent 4fc2908e09
commit 8463462dcc
1 changed files with 2 additions and 15 deletions

View File

@ -18,7 +18,7 @@ operatorNamespace: rook-ceph
# Installs a debugging toolbox deployment
toolbox:
enabled: true
image: rook/ceph:VERSION
image: rook/ceph:master
tolerations: []
affinity: {}
@ -36,21 +36,13 @@ monitoring:
# More information can be found at [Ceph Cluster CRD](/Documentation/ceph-cluster-crd.md)
cephClusterSpec:
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v14 is nautilus, v15 is octopus, and v16 is pacific.
# RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
# See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
image: quay.io/ceph/ceph:v16.2.5
# Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported.
# Future versions such as `pacific` would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
@ -252,11 +244,6 @@ cephClusterSpec:
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
pgHealthCheckTimeout: 0
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
# Only available on OpenShift.
manageMachineDisruptionBudgets: false
# Namespace in which to watch for the MachineDisruptionBudgets.
machineDisruptionBudgetNamespace: openshift-machine-api
# Configure the healthcheck and liveness probes for ceph pods.
# Valid values for daemons are 'mon', 'osd', 'status'