Skip to content

Commit 2d507bf

Browse files
committed
Update to rook v1.12.10, ceph v17.2.6
- Update to rook v1.12.10 - Update to ceph v17.2.6 - Add to example volume yaml - support Ceph HEALTH_WARN status - support k8s v1.29
1 parent 792aa2c commit 2d507bf

File tree

21 files changed

+10365
-4335
lines changed

21 files changed

+10365
-4335
lines changed

hack/example/fs-pod.yaml

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
---
2+
apiVersion: apps/v1
3+
kind: Deployment
4+
metadata:
5+
name: fs-deployment-nginx
6+
namespace: default
7+
spec:
8+
replicas: 2
9+
selector:
10+
matchLabels:
11+
app: fs-pod-nginx
12+
template:
13+
metadata:
14+
labels:
15+
app: fs-pod-nginx
16+
spec:
17+
affinity:
18+
podAntiAffinity:
19+
requiredDuringSchedulingIgnoredDuringExecution:
20+
- labelSelector:
21+
matchExpressions:
22+
- key: app
23+
operator: In
24+
values:
25+
- fs-pod-nginx
26+
topologyKey: "kubernetes.io/hostname"
27+
containers:
28+
- name: web-server
29+
image: nginx
30+
volumeMounts:
31+
- name: mypvc
32+
mountPath: /var/lib/www/html
33+
volumes:
34+
- name: mypvc
35+
persistentVolumeClaim:
36+
claimName: cephfs-pvc
37+
readOnly: false

hack/example/fs-pvc.yaml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
---
2+
apiVersion: v1
3+
kind: PersistentVolumeClaim
4+
metadata:
5+
name: cephfs-pvc
6+
spec:
7+
accessModes:
8+
- ReadWriteMany
9+
resources:
10+
requests:
11+
storage: 1Gi
12+
storageClassName: csi-cephfs-sc

hack/example/fs-snapshot.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
---
2+
apiVersion: snapshot.storage.k8s.io/v1
3+
kind: VolumeSnapshot
4+
metadata:
5+
name: cephfs-pvc-snapshot
6+
spec:
7+
volumeSnapshotClassName: ceph-file-snapclass
8+
source:
9+
persistentVolumeClaimName: cephfs-pvc

hack/example/rbd-pod.yaml

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
---
2+
apiVersion: v1
3+
kind: Pod
4+
metadata:
5+
name: csirbd-demo-pod
6+
spec:
7+
containers:
8+
- name: web-server
9+
image: nginx
10+
volumeMounts:
11+
- name: mypvc
12+
mountPath: /var/lib/www/html
13+
volumes:
14+
- name: mypvc
15+
persistentVolumeClaim:
16+
claimName: rbd-pvc
17+
readOnly: false

hack/example/rbd-pvc.yaml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
---
2+
apiVersion: v1
3+
kind: PersistentVolumeClaim
4+
metadata:
5+
name: rbd-pvc
6+
spec:
7+
accessModes:
8+
- ReadWriteOnce
9+
resources:
10+
requests:
11+
storage: 1Gi
12+
storageClassName: ceph-block-sc

hack/example/rbd-snapshot.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
---
2+
apiVersion: snapshot.storage.k8s.io/v1
3+
kind: VolumeSnapshot
4+
metadata:
5+
name: rbd-pvc-snapshot
6+
spec:
7+
volumeSnapshotClassName: ceph-block-snapclass
8+
source:
9+
persistentVolumeClaimName: rbd-pvc

hack/inventory/production-sample/rook/cluster.yaml

Lines changed: 40 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,14 @@ metadata:
1616
spec:
1717
cephVersion:
1818
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
19-
# v15 is octopus, and v16 is pacific.
20-
# RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
19+
# v16 is Pacific, and v17 is Quincy.
20+
# RECOMMENDATION: In production, use a specific version tag instead of the general v17 flag, which pulls the latest release and could result in different
2121
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
22-
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.10-20220721
22+
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v17.2.6-20230410
2323
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
24-
image: quay.io/ceph/ceph:v16.2.10
25-
# Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
26-
# Future versions such as `pacific` would require this to be set to `true`.
24+
image: quay.io/ceph/ceph:v17.2.6
25+
# Whether to allow unsupported versions of Ceph. Currently `pacific`, `quincy`, and `reef` are supported.
26+
# Future versions such as `squid` (v19) would require this to be set to `true`.
2727
# Do not set to true in production.
2828
allowUnsupported: false
2929
# The path on the host where configuration files will be persisted. Must be specified.
@@ -70,10 +70,17 @@ spec:
7070
# port: 8443
7171
# serve the dashboard using SSL
7272
ssl: true
73+
# The url of the Prometheus instance
74+
# prometheusEndpoint: <protocol>://<prometheus-host>:<port>
75+
# Whether SSL should be verified if the Prometheus server is using https
76+
# prometheusEndpointSSLVerify: false
7377
# enable prometheus alerting for cluster
7478
monitoring:
7579
# requires Prometheus to be pre-installed
7680
enabled: false
81+
# Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled.
82+
# If true, the prometheus mgr module and Ceph exporter are both disabled. Default is false.
83+
metricsDisabled: false
7784
network:
7885
connections:
7986
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
@@ -88,35 +95,45 @@ spec:
8895
# Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
8996
compression:
9097
enabled: false
98+
# Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
99+
# and clients will be required to connect to the Ceph cluster with the v2 port (3300).
100+
# Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
101+
requireMsgr2: false
91102
# enable host networking
92103
#provider: host
93104
# enable the Multus network provider
94105
#provider: multus
95106
#selectors:
96-
# The selector keys are required to be `public` and `cluster`.
97-
# Based on the configuration, the operator will do the following:
98-
# 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
99-
# 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
100-
#
101-
# In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
102-
#
103-
#public: public-conf --> NetworkAttachmentDefinition object name in Multus
104-
#cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
107+
# The selector keys are required to be `public` and `cluster`.
108+
# Based on the configuration, the operator will do the following:
109+
# 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
110+
# 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
111+
#
112+
# In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
113+
#
114+
# public: public-conf --> NetworkAttachmentDefinition object name in Multus
115+
# cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
105116
# Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
106117
#ipFamily: "IPv6"
107118
# Ceph daemons to listen on both IPv4 and Ipv6 networks
108119
#dualStack: false
120+
# Enable multiClusterService to export the mon and OSD services to peer cluster.
121+
# This is useful to support RBD mirroring between two clusters having overlapping CIDRs.
122+
# Ensure that peer clusters are connected using an MCS API compatible application, like Globalnet Submariner.
123+
#multiClusterService:
124+
# enabled: false
125+
109126
# enable the crash collector for ceph daemon crash collection
110127
crashCollector:
111128
disable: false
112129
# Uncomment daysToRetain to prune ceph crash entries older than the
113130
# specified number of days.
114131
#daysToRetain: 30
115132
# enable log collector, daemons will log on files and rotate
116-
# logCollector:
117-
# enabled: true
118-
# periodicity: daily # one of: hourly, daily, weekly, monthly
119-
# maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
133+
logCollector:
134+
enabled: false
135+
periodicity: daily # one of: hourly, daily, weekly, monthly
136+
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
120137
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
121138
cleanupPolicy:
122139
# Since cluster cleanup is destructive to data, confirmation is required.
@@ -242,11 +259,10 @@ spec:
242259
# crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
243260
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
244261
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
245-
# journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
246262
# osdsPerDevice: "1" # this value can be overridden at the node or device level
247263
# encryptedDevice: "true" # the default value for this option is "false"
248-
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
249-
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
264+
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
265+
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
250266
# nodes:
251267
# - name: "172.17.4.201"
252268
# devices: # specific devices to use for storage can be specified for each node
@@ -260,6 +276,8 @@ spec:
260276
# deviceFilter: "^sd."
261277
# when onlyApplyOSDPlacement is false, will merge both placement.All() and placement.osd
262278
onlyApplyOSDPlacement: false
279+
# Time for which an OSD pod will sleep before restarting, if it stopped due to flapping
280+
# flappingRestartIntervalHours: 24
263281
# The section for configuring management of daemon disruptions during upgrade or fencing.
264282
disruptionManagement:
265283
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
@@ -273,11 +291,6 @@ spec:
273291
# Operator will continue with the next drain if the timeout exceeds. It only works if `managePodBudgets` is `true`.
274292
# No values or 0 means that the operator will wait until the placement groups are healthy before unblocking the next drain.
275293
pgHealthCheckTimeout: 0
276-
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
277-
# Only available on OpenShift.
278-
manageMachineDisruptionBudgets: false
279-
# Namespace in which to watch for the MachineDisruptionBudgets.
280-
machineDisruptionBudgetNamespace: openshift-machine-api
281294

282295
# healthChecks
283296
# Valid values for daemons are 'mon', 'osd', 'status'

0 commit comments

Comments
 (0)