Skip to content

Commit 792aa2c

Browse files
committed
Update to rook v1.9.10, Rename cephfs storageclass
- cephfs storageclass, ceph-file-sc -> csi-cephfs-sc - Update inventory to rook v1.9.10
1 parent 99ee955 commit 792aa2c

File tree

23 files changed

+4829
-2287
lines changed

23 files changed

+4829
-2287
lines changed

docs/examples/file-nginx.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ spec:
88
resources:
99
requests:
1010
storage: 100Mi
11-
storageClassName: ceph-file-sc
11+
storageClassName: csi-cephfs-sc
1212
---
1313
apiVersion: apps/v1
1414
kind: Deployment

docs/file.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ spec:
102102
apiVersion: storage.k8s.io/v1
103103
kind: StorageClass
104104
metadata:
105-
name: ceph-file-sc
105+
name: csi-cephfs-sc
106106
provisioner: rook-ceph.cephfs.csi.ceph.com
107107
parameters:
108108
# clusterID is the namespace where operator is deployed.

hack/inventory/production-sample/rook/block_pool.yaml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,3 @@ spec:
6262
# quotas:
6363
# maxSize: "10Gi" # valid suffixes include k, M, G, T, P, E, Ki, Mi, Gi, Ti, Pi, Ei
6464
# maxObjects: 1000000000 # 1 billion objects
65-
# A key/value list of annotations
66-
annotations:
67-
# key: value

hack/inventory/production-sample/rook/block_sc.yaml

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,3 @@
1-
apiVersion: ceph.rook.io/v1
2-
kind: CephBlockPool
3-
metadata:
4-
name: replicapool
5-
namespace: rook-ceph
6-
spec:
7-
failureDomain: host
8-
replicated:
9-
size: 3
10-
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
11-
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
12-
requireSafeReplicaSize: true
13-
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
14-
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
15-
#targetSizeRatio: .5
16-
---
171
apiVersion: storage.k8s.io/v1
182
kind: StorageClass
193
metadata:
@@ -47,6 +31,15 @@ parameters:
4731
# https://docs.ceph.com/docs/master/man/8/rbd-nbd/#options
4832
# unmapOptions: force
4933

34+
# (optional) Set it to true to encrypt each volume with encryption keys
35+
# from a key management system (KMS)
36+
# encrypted: "true"
37+
38+
# (optional) Use external key management system (KMS) for encryption key by
39+
# specifying a unique ID matching a KMS ConfigMap. The ID is only used for
40+
# correlation to configmap entry.
41+
# encryptionKMSID: <kms-config-id>
42+
5043
# RBD image format. Defaults to "2".
5144
imageFormat: "2"
5245

hack/inventory/production-sample/rook/cluster.yaml

Lines changed: 49 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ metadata:
1616
spec:
1717
cephVersion:
1818
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
19-
# v14 is nautilus, v15 is octopus, and v16 is pacific.
19+
# v15 is octopus, and v16 is pacific.
2020
# RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
2121
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
22-
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.6-20210918
22+
# If you want to be more precise, you can always use a timestamp tag such quay.io/ceph/ceph:v16.2.10-20220721
2323
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
24-
image: quay.io/ceph/ceph:v15.2.8
25-
# Whether to allow unsupported versions of Ceph. Currently `nautilus`, `octopus`, and `pacific` are supported.
24+
image: quay.io/ceph/ceph:v16.2.10
25+
# Whether to allow unsupported versions of Ceph. Currently `octopus` and `pacific` are supported.
2626
# Future versions such as `pacific` would require this to be set to `true`.
2727
# Do not set to true in production.
2828
allowUnsupported: false
@@ -33,13 +33,13 @@ spec:
3333
# Whether or not upgrade should continue even if a check fails
3434
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
3535
# Use at your OWN risk
36-
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
36+
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/latest/ceph-upgrade.html#ceph-version-upgrades
3737
skipUpgradeChecks: false
3838
# Whether or not continue if PGs are not clean during an upgrade
3939
continueUpgradeAfterChecksEvenIfNotHealthy: false
4040
# WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
4141
# If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
42-
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then opertor would
42+
# if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
4343
# continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
4444
# The default wait timeout is 10 minutes.
4545
waitTimeoutForHealthyOSDInMinutes: 10
@@ -54,7 +54,8 @@ spec:
5454
# When higher availability of the mgr is needed, increase the count to 2.
5555
# In that case, one mgr will be active and one in standby. When Ceph updates which
5656
# mgr is active, Rook will update the mgr services to match the active mgr.
57-
count: 1
57+
count: 2
58+
allowMultiplePerNode: false
5859
modules:
5960
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
6061
# are already enabled by other settings in the cluster CR.
@@ -73,13 +74,20 @@ spec:
7374
monitoring:
7475
# requires Prometheus to be pre-installed
7576
enabled: false
76-
# namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
77-
# Recommended:
78-
# If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
79-
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
80-
# deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
81-
rulesNamespace: rook-ceph
8277
network:
78+
connections:
79+
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
80+
# The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
81+
# When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
82+
# IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
83+
# you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
84+
# The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
85+
encryption:
86+
enabled: false
87+
# Whether to compress the data in transit across the wire. The default is false.
88+
# Requires Ceph Quincy (v17) or newer. Also see the kernel requirements above for encryption.
89+
compression:
90+
enabled: false
8391
# enable host networking
8492
#provider: host
8593
# enable the Multus network provider
@@ -107,8 +115,9 @@ spec:
107115
# enable log collector, daemons will log on files and rotate
108116
# logCollector:
109117
# enabled: true
110-
# periodicity: 24h # SUFFIX may be 'h' for hours or 'd' for days.
111-
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
118+
# periodicity: daily # one of: hourly, daily, weekly, monthly
119+
# maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
120+
# automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
112121
cleanupPolicy:
113122
# Since cluster cleanup is destructive to data, confirmation is required.
114123
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
@@ -158,6 +167,7 @@ spec:
158167
# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
159168
# preferred rule with weight: 50.
160169
# osd:
170+
# prepareosd:
161171
# mgr:
162172
# cleanup:
163173
annotations:
@@ -166,6 +176,10 @@ spec:
166176
# osd:
167177
# cleanup:
168178
# prepareosd:
179+
# clusterMetadata annotations will be applied to only `rook-ceph-mon-endpoints` configmap and the `rook-ceph-mon` and `rook-ceph-admin-keyring` secrets.
180+
# And clusterMetadata annotations will not be merged with `all` annotations.
181+
# clusterMetadata:
182+
# kubed.appscode.com/sync: "true"
169183
# If no mgr annotations are set, prometheus scrape annotations will be set by default.
170184
# mgr:
171185
labels:
@@ -178,22 +192,23 @@ spec:
178192
# monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
179193
# These labels can be passed as LabelSelector to Prometheus
180194
# monitoring:
195+
# crashcollector:
181196
resources:
182197
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
183198
# mgr:
184199
# limits:
185-
# cpu: "1"
200+
# cpu: "500m"
186201
# memory: "1024Mi"
187202
# requests:
188-
# cpu: "1"
203+
# cpu: "500m"
189204
# memory: "1024Mi"
190205
# The above example requests/limits can also be added to the other components
191206
# mon:
192207
# limits:
193-
# cpu: "2"
208+
# cpu: "1"
194209
# memory: "2048Mi"
195210
# requests:
196-
# cpu: "2"
211+
# cpu: "1"
197212
# memory: "2048Mi"
198213
# osd:
199214
# limits:
@@ -214,10 +229,11 @@ spec:
214229
# The option to automatically remove OSDs that are out and are safe to destroy.
215230
removeOSDsIfOutAndSafeToRemove: false
216231
priorityClassNames:
217-
all: rook-ceph-default-priority-class
218-
# mon: rook-ceph-mon-priority-class
219-
# osd: rook-ceph-osd-priority-class
220-
# mgr: rook-ceph-mgr-priority-class
232+
#all: rook-ceph-default-priority-class
233+
mon: system-node-critical
234+
osd: system-node-critical
235+
mgr: system-cluster-critical
236+
#crashcollector: rook-ceph-crashcollector-priority-class
221237
storage: # cluster level storage configuration and selection
222238
useAllNodes: false
223239
useAllDevices: false
@@ -249,7 +265,7 @@ spec:
249265
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
250266
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
251267
# block eviction of OSDs by default and unblock them safely when drains are detected.
252-
managePodBudgets: false
268+
managePodBudgets: true
253269
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
254270
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
255271
osdMaintenanceTimeout: 30
@@ -276,11 +292,19 @@ spec:
276292
status:
277293
disabled: false
278294
interval: 60s
279-
# Change pod liveness probe, it works for all mon,mgr,osd daemons
295+
# Change pod liveness probe timing or threshold values. Works for all mon,mgr,osd daemons.
280296
livenessProbe:
281297
mon:
282298
disabled: false
283299
mgr:
284300
disabled: false
285301
osd:
286302
disabled: false
303+
# Change pod startup probe timing or threshold values. Works for all mon,mgr,osd daemons.
304+
startupProbe:
305+
mon:
306+
disabled: false
307+
mgr:
308+
disabled: false
309+
osd:
310+
disabled: false

0 commit comments

Comments
 (0)