Merge branch 'master' of code.ungleich.ch:ungleich-public/ungleich-k8s

This commit is contained in:
Nico Schottelius 2021-08-08 12:55:24 +02:00
commit 3792700e1c
6 changed files with 811 additions and 95 deletions

29
k8s/p6/calico-bgp.yaml Normal file
View File

@ -0,0 +1,29 @@
---
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
logSeverityScreen: Info
nodeToNodeMeshEnabled: true
asNumber: 65534
serviceClusterIPs:
- cidr: 2a0a:e5c0:2:14::/108
serviceExternalIPs:
- cidr: 2a0a:e5c0:2:14::/108
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: apu-router1-place6
spec:
peerIP: 2a0a:e5c0:2:1::43
asNumber: 207996
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: apu-router2-place6
spec:
peerIP: 2a0a:e5c0:2:1::44
asNumber: 207996

13
k8s/p6/kubeadm.yaml Normal file
View File

@ -0,0 +1,13 @@
# kubeadm-config.yaml
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta2
kubernetesVersion: v1.21.2
networking:
dnsDomain: p6.k8s.ooo
podSubnet: 2a0a:e5c0:12:1::/64
serviceSubnet: 2a0a:e5c0:12::/108
controlPlaneEndpoint: "p6-api.k8s.ooo:6443"
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: cgroupfs

View File

@ -31,6 +31,15 @@ for yaml in crds common operator cluster storageclass-cephfs storageclass-rbd to
done
```
## v3 via helm
```
helm repo add rook-release https://charts.rook.io/release
helm repo update
helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph
helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \
--set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f rook/values.yaml
```
## Debugging / ceph toolbox

415
rook/values.yaml Normal file
View File

@ -0,0 +1,415 @@
# Default values for a single rook-ceph cluster
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# Namespace of the main rook operator
operatorNamespace: rook-ceph
# The metadata.name of the CephCluster CR. The default name is the same as the namespace.
# clusterName: rook-ceph
# Ability to override ceph.conf
# configOverride: |
# [global]
# mon_allow_pool_delete = true
# osd_pool_default_size = 3
# osd_pool_default_min_size = 2
# Installs a debugging toolbox deployment
toolbox:
enabled: true
image: rook/ceph:VERSION
tolerations: []
affinity: {}
monitoring:
# requires Prometheus to be pre-installed
# enabling will also create RBAC rules to allow Operator to create ServiceMonitors
enabled: false
rulesNamespaceOverride:
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
# imagePullSecrets:
# - name: my-registry-secret
# All values below are taken from the CephCluster CRD
# More information can be found at [Ceph Cluster CRD](/Documentation/ceph-cluster-crd.md)
cephClusterSpec:
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v14 is nautilus, v15 is octopus, and v16 is pacific.
# RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v15.2.11-20200419
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: quay.io/ceph/ceph:v16.2.5
# Whether to allow unsupported versions of Ceph. Currently `nautilus` and `octopus` are supported.
# Future versions such as `pacific` would require this to be set to `true`.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
# The default wait timeout is 10 minutes.
waitTimeoutForHealthyOSDInMinutes: 10
mon:
# Set the number of mons to be started. Must be an odd number, and is generally recommended to be 3.
count: 3
# The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
# Mons should only be allowed on the same node for test environments where data loss is acceptable.
allowMultiplePerNode: false
mgr:
# When higher availability of the mgr is needed, increase the count to 2.
# In that case, one mgr will be active and one in standby. When Ceph updates which
# mgr is active, Rook will update the mgr services to match the active mgr.
count: 1
modules:
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
# are already enabled by other settings in the cluster CR.
- name: pg_autoscaler
enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
# port: 8443
# Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/ceph-cluster-crd.md#network-configuration-settings
network:
ipFamily: "IPv6"
dualStack: false
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# Uncomment daysToRetain to prune ceph crash entries older than the
# specified number of days.
# daysToRetain: 30
# enable log collector, daemons will log on files and rotate
# logCollector:
# enabled: true
# periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
# This value should only be set when the cluster is about to be deleted. After the confirmation is set,
# Rook will immediately stop configuring the cluster and only wait for the delete command.
# If the empty string is set, Rook will not destroy any data on hosts during uninstall.
confirmation: ""
# sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
sanitizeDisks:
# method indicates if the entire disk should be sanitized or simply ceph's metadata
# in both case, re-install is possible
# possible choices are 'complete' or 'quick' (default)
method: quick
# dataSource indicate where to get random bytes from to write on the disk
# possible choices are 'zero' (default) or 'random'
# using random sources will consume entropy from the system and will take much more time then the zero source
dataSource: zero
# iteration overwrite N times instead of the default (1)
# takes an integer value
iteration: 1
# allowUninstallWithVolumes defines how the uninstall should be performed
# If set to true, cephCluster deletion does not wait for the PVs to be deleted.
allowUninstallWithVolumes: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
# placement:
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# topologySpreadConstraints:
# tolerations:
# - key: storage-node
# operator: Exists
# # The above placement information can also be specified for mon, osd, and mgr components
# mon:
# # Monitor deployments may contain an anti-affinity rule for avoiding monitor
# # collocation on the same node. This is a required rule when host network is used
# # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# # preferred rule with weight: 50.
# osd:
# mgr:
# cleanup:
# annotations:
# all:
# mon:
# osd:
# cleanup:
# prepareosd:
# # If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
# labels:
# all:
# mon:
# osd:
# cleanup:
# mgr:
# prepareosd:
# # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
# # These labels can be passed as LabelSelector to Prometheus
# monitoring:
# resources:
# # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
# mgr:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# # The above example requests/limits can also be added to the other components
# mon:
# osd:
# prepareosd:
# mgr-sidecar:
# crashcollector:
# logcollector:
# cleanup:
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
# priority classes to apply to ceph resources
# priorityClassNames:
# all: rook-ceph-default-priority-class
# mon: rook-ceph-mon-priority-class
# osd: rook-ceph-osd-priority-class
# mgr: rook-ceph-mgr-priority-class
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: true
# deviceFilter:
# config:
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
# osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
# config: # configuration can be specified at the node level which overrides the cluster level config
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# A duration in minutes that the operator will wait for the placement groups to become healthy (active+clean) after a drain was completed and OSDs came back up.
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
pgHealthCheckTimeout: 0
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
# Only available on OpenShift.
manageMachineDisruptionBudgets: false
# Namespace in which to watch for the MachineDisruptionBudgets.
machineDisruptionBudgetNamespace: openshift-machine-api
# Configure the healthcheck and liveness probes for ceph pods.
# Valid values for daemons are 'mon', 'osd', 'status'
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
# Change pod liveness probe, it works for all mon, mgr, and osd pods.
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
ingress:
dashboard: {}
# annotations:
# kubernetes.io/ingress.class: nginx
# external-dns.alpha.kubernetes.io/hostname: example.com
# nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
# host:
# name: example.com
# path: "/ceph-dashboard(/|$)(.*)"
# tls:
cephBlockPools:
- name: ceph-blockpool-hdd
# see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration
spec:
failureDomain: host
replicated:
size: 3
deviceClass: "hdd"
storageClass:
enabled: true
name: ceph-block-hdd
isDefault: true
reclaimPolicy: Delete
allowVolumeExpansion: true
# see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
parameters:
imageFormat: "2"
imageFeatures: layering
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
- name: ceph-blockpool-ssd
# see https://github.com/rook/rook/blob/master/Documentation/ceph-pool-crd.md#spec for available configuration
spec:
failureDomain: host
replicated:
size: 3
deviceClass: "ssd"
storageClass:
enabled: true
name: ceph-block-ssd
isDefault: false
reclaimPolicy: Delete
allowVolumeExpansion: true
# see https://github.com/rook/rook/blob/master/Documentation/ceph-block.md#provision-storage for available configuration
parameters:
imageFormat: "2"
imageFeatures: layering
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
cephFileSystems:
- name: ceph-filesystem-ssd
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
size: 3
deviceClass: "ssd"
dataPools:
- failureDomain: host
replicated:
size: 3
deviceClass: "ssd"
metadataServer:
activeCount: 1
activeStandby: true
storageClass:
enabled: true
name: ceph-filesystem
reclaimPolicy: Delete
# see https://github.com/rook/rook/blob/master/Documentation/ceph-filesystem.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
# Specify the filesystem type of the volume. If not specified, csi-provisioner
# will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
# in hyperconverged settings where the volume is mounted on the same node as the osds.
csi.storage.k8s.io/fstype: ext4
# cephObjectStores:
# - name: ceph-objectstore
# # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-store-crd.md#object-store-settings for available configuration
# spec:
# metadataPool:
# failureDomain: host
# replicated:
# size: 3
# dataPool:
# failureDomain: host
# erasureCoded:
# dataChunks: 2
# codingChunks: 1
# preservePoolsOnDelete: true
# gateway:
# port: 80
# # securePort: 443
# # sslCertificateRef:
# instances: 1
# healthCheck:
# bucket:
# interval: 60s
# storageClass:
# enabled: true
# name: ceph-bucket
# reclaimPolicy: Delete
# # see https://github.com/rook/rook/blob/master/Documentation/ceph-object-bucket-claim.md#storageclass for available configuration
# parameters:
# # note: objectStoreNamespace and objectStoreName are configured by the chart
# region: us-east-1

View File

@ -0,0 +1,48 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pvcrwotest
spec:
selector:
matchLabels:
app: pvcrwotest
replicas: 1
template:
metadata:
labels:
app: pvcrwotest
spec:
containers:
- name: nginx
image: nginx:1.21-alpine
ports:
- containerPort: 443
volumeMounts:
- name: data
mountPath: "/var/www/html"
- name: busybox
image: busybox
args:
- sleep
- "100000000"
volumeMounts:
- name: data
mountPath: "/data"
volumes:
- name: data
persistentVolumeClaim:
claimName: pvcrwotest-data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvcrwotest-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: rook-ceph-block

View File

@ -32,9 +32,9 @@ data:
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "false",
"assign_ipv6": "true"
"assign_ipv6": true",
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
@ -243,6 +243,11 @@ spec:
Peers node to use the "next hop keep;" instead of "next hop self;"(default)
in the specific branch of the Node on "bird.cfg".
type: boolean
maxRestartTime:
description: Time to allow for software restart. When specified, this
is configured as the graceful restart timeout. When not specified,
the BIRD default of 120s is used.
type: string
node:
description: The node name identifying the Calico node instance that
is targeted by this peer. If this is not set, and no nodeSelector
@ -513,13 +518,6 @@ spec:
description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
[Default: false]'
type: boolean
bpfExtToServiceConnmark:
description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
mark that is set on connections from an external client to a local
service. This mark allows us to control how packets of that connection
are routed within the host and how is routing intepreted by RPF
check. [Default: 0]'
type: integer
bpfExternalServiceMode:
description: 'BPFExternalServiceMode in BPF mode, controls how connections
from outside the cluster to services (node ports and cluster IPs)
@ -530,6 +528,14 @@ spec:
node appears to use the IP of the ingress node; this requires a
permissive L2 network. [Default: Tunnel]'
type: string
bpfExtToServiceConnmark:
description: 'BPFExtToServiceConnmark in BPF mode, controls a
32bit mark that is set on connections from an external client to
a local service. This mark allows us to control how packets of
that connection are routed within the host and how is routing
intepreted by RPF check. [Default: 0]'
type: integer
bpfKubeProxyEndpointSlicesEnabled:
description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
whether Felix's embedded kube-proxy accepts EndpointSlices or not.
@ -1063,16 +1069,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -1160,6 +1167,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
@ -1268,16 +1295,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -1365,6 +1393,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
@ -1394,16 +1442,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -1491,6 +1540,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
@ -1599,16 +1668,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -1696,6 +1766,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
@ -2280,6 +2370,11 @@ spec:
host endpoints for every node. [Default: Disabled]'
type: string
type: object
leakGracePeriod:
description: 'LeakGracePeriod is the period used by the controller
to determine if an IP address has been leaked. Set to 0
to disable IP garbage collection. [Default: 15m]'
type: string
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
@ -2380,6 +2475,12 @@ spec:
of host endpoints for every node. [Default: Disabled]'
type: string
type: object
leakGracePeriod:
description: 'LeakGracePeriod is the period used by the
controller to determine if an IP address has been leaked.
Set to 0 to disable IP garbage collection. [Default:
15m]'
type: string
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
@ -2508,16 +2609,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -2605,6 +2707,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
@ -2713,16 +2835,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -2810,6 +2933,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
@ -2839,16 +2982,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -2936,6 +3080,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
@ -3044,16 +3208,17 @@ spec:
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and Selector are defined on the same rule, then only workload
endpoints that are matched by both selectors will be selected
by the rule. \n For NetworkPolicy, an empty NamespaceSelector
implies that the Selector is limited to selecting only
workload endpoints in the same namespace as the NetworkPolicy.
\n For NetworkPolicy, `global()` NamespaceSelector implies
that the Selector is limited to selecting only GlobalNetworkSet
or HostEndpoint. \n For GlobalNetworkPolicy, an empty
NamespaceSelector implies the Selector applies to workload
endpoints across all namespaces."
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
@ -3141,6 +3306,26 @@ spec:
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Ports, NotPorts, Nets, NotNets or ServiceAccounts. \n
Only valid on egress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
@ -3274,12 +3459,14 @@ rules:
- watch
- list
- get
# Pods are queried to check for existence.
# Pods are watched to check for existence as part of IPAM controller.
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
@ -3361,6 +3548,14 @@ rules:
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
@ -3546,7 +3741,7 @@ spec:
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.19.0
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
@ -3573,7 +3768,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.19.0
image: docker.io/calico/cni:v3.20.0
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
@ -3614,7 +3809,7 @@ spec:
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: docker.io/calico/pod2daemon-flexvol:v3.19.0
image: docker.io/calico/pod2daemon-flexvol:v3.20.0
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
@ -3625,7 +3820,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.19.0
image: docker.io/calico/node:v3.20.0
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
@ -3713,6 +3908,7 @@ spec:
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
@ -3720,7 +3916,12 @@ spec:
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
@ -3836,7 +4037,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.19.0
image: docker.io/calico/kube-controllers:v3.20.0
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
@ -3851,6 +4052,7 @@ spec:
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command: