diff --git a/cluster/addons/volume-group-snapshots/csi-hostpath-plugin.yaml b/cluster/addons/volume-group-snapshots/csi-hostpath-plugin.yaml new file mode 100644 index 0000000000000..44fefe22cef2c --- /dev/null +++ b/cluster/addons/volume-group-snapshots/csi-hostpath-plugin.yaml @@ -0,0 +1,397 @@ + # All of the individual sidecar RBAC roles get bound +# to this account. +kind: ServiceAccount +apiVersion: v1 +metadata: + name: csi-hostpathplugin-sa + namespace: default + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: serviceaccount +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: attacher-cluster-role + name: csi-hostpathplugin-attacher-cluster-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-attacher-runner +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: health-monitor-controller-cluster-role + name: csi-hostpathplugin-health-monitor-controller-cluster-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-health-monitor-controller-runner +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: provisioner-cluster-role + name: csi-hostpathplugin-provisioner-cluster-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-provisioner-runner +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: resizer-cluster-role + name: csi-hostpathplugin-resizer-cluster-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-resizer-runner +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: snapshotter-cluster-role + name: csi-hostpathplugin-snapshotter-cluster-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: external-snapshotter-runner +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: attacher-role + name: csi-hostpathplugin-attacher-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: external-attacher-cfg +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: health-monitor-controller-role + name: csi-hostpathplugin-health-monitor-controller-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: external-health-monitor-controller-cfg +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: provisioner-role + name: csi-hostpathplugin-provisioner-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: external-provisioner-cfg +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: resizer-role + name: csi-hostpathplugin-resizer-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: external-resizer-cfg +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: snapshotter-role + name: csi-hostpathplugin-snapshotter-role +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: external-snapshotter-leaderelection +subjects: +- kind: ServiceAccount + name: csi-hostpathplugin-sa +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpathplugin + namespace: default + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: plugin +spec: + serviceName: "csi-hostpathplugin" + # One replica only: + # Host path driver only works when everything runs + # on a single node. + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: plugin + template: + metadata: + labels: + app.kubernetes.io/instance: hostpath.csi.k8s.io + app.kubernetes.io/part-of: csi-driver-host-path + app.kubernetes.io/name: csi-hostpathplugin + app.kubernetes.io/component: plugin + spec: + serviceAccountName: csi-hostpathplugin-sa + containers: + - name: hostpath + image: registry.k8s.io/sig-storage/hostpathplugin:v1.14.0 + args: + - "--drivername=hostpath.csi.k8s.io" + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /csi-data-dir + name: csi-data-dir + - mountPath: /dev + name: dev-dir + + - name: csi-external-health-monitor-controller + image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.12.1 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--leader-election" + env: + - name: ADDRESS + value: /csi/csi.sock + imagePullPolicy: "IfNotPresent" + volumeMounts: + - name: socket-dir + mountPath: /csi + + - name: node-driver-registrar + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 + args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - mountPath: /csi-data-dir + name: csi-data-dir + + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + args: + - --csi-address=/csi/csi.sock + - --health-port=9898 + + - name: csi-attacher + image: registry.k8s.io/sig-storage/csi-attacher:v4.6.1 + args: + - --v=5 + - --csi-address=/csi/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + + - name: csi-provisioner + image: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1 + args: + - -v=5 + - --csi-address=/csi/csi.sock + - --feature-gates=Topology=true + # end csi-provisioner args + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + + - name: csi-resizer + image: registry.k8s.io/sig-storage/csi-resizer:v1.11.1 + args: + - -v=5 + - -csi-address=/csi/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + + - name: csi-snapshotter + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 + args: + - -v=5 + - --csi-address=/csi/csi.sock + - --enable-volume-group-snapshots=true + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-hostpath-data/ + type: DirectoryOrCreate + name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir diff --git a/cluster/addons/volume-group-snapshots/run_group_snapshot_e2e.sh b/cluster/addons/volume-group-snapshots/run_group_snapshot_e2e.sh new file mode 100755 index 0000000000000..39fbda0a32037 --- /dev/null +++ b/cluster/addons/volume-group-snapshots/run_group_snapshot_e2e.sh @@ -0,0 +1,325 @@ +#!/bin/sh +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# hack script for running a kind e2e +# must be run with a kubernetes checkout in $PWD (IE from the checkout) +# Usage: SKIP="ginkgo skip regex" FOCUS="ginkgo focus regex" kind-e2e.sh + +set -o errexit -o nounset -o xtrace + +# Settings: +# SKIP: ginkgo skip regex +# FOCUS: ginkgo focus regex +# LABEL_FILTER: ginkgo label query for selecting tests (see "Spec Labels" in https://onsi.github.io/ginkgo/#filtering-specs) +# +# The default is to focus on conformance tests. Serial tests get skipped when +# parallel testing is enabled. Using LABEL_FILTER instead of combining SKIP and +# FOCUS is recommended (more expressive, easier to read than regexp). +# +# GA_ONLY: true - limit to GA APIs/features as much as possible +# false - (default) APIs and features left at defaults +# FEATURE_GATES: +# JSON or YAML encoding of a string/bool map: {"FeatureGateA": true, "FeatureGateB": false} +# Enables or disables feature gates in the entire cluster. +# Cannot be used when GA_ONLY=true. +# RUNTIME_CONFIG: +# JSON or YAML encoding of a string/string (!) map: {"apia.example.com/v1alpha1": "true", "apib.example.com/v1beta1": "false"} +# Enables API groups in the apiserver via --runtime-config. +# Cannot be used when GA_ONLY=true. + +# cleanup logic for cleanup on exit +CLEANED_UP=false +cleanup() { + if [ "$CLEANED_UP" = "true" ]; then + return + fi + # KIND_CREATE_ATTEMPTED is true once we: kind create + if [ "${KIND_CREATE_ATTEMPTED:-}" = true ]; then + kind "export" logs "${ARTIFACTS}" || true + kind delete cluster || true + fi + rm -f _output/bin/e2e.test || true + # remove our tempdir, this needs to be last, or it will prevent kind delete + if [ -n "${TMP_DIR:-}" ]; then + rm -rf "${TMP_DIR:?}" + fi + CLEANED_UP=true +} + +# setup signal handlers +# shellcheck disable=SC2317 # this is not unreachable code +signal_handler() { + if [ -n "${GINKGO_PID:-}" ]; then + kill -TERM "$GINKGO_PID" || true + fi + cleanup +} +trap signal_handler INT TERM + +# build kubernetes / node image, e2e binaries +build() { + # build the node image w/ kubernetes + kind build node-image -v 1 + # Ginkgo v1 is used by Kubernetes 1.24 and earlier, fallback if v2 is not available. + GINKGO_SRC_DIR="vendor/github.com/onsi/ginkgo/v2/ginkgo" + if [ ! -d "$GINKGO_SRC_DIR" ]; then + GINKGO_SRC_DIR="vendor/github.com/onsi/ginkgo/ginkgo" + fi + # make sure we have e2e requirements + make all WHAT="cmd/kubectl test/e2e/e2e.test ${GINKGO_SRC_DIR}" + + # Ensure the built kubectl is used instead of system + export PATH="${PWD}/_output/bin:$PATH" +} + +check_structured_log_support() { + case "${KUBE_VERSION}" in + v1.1[0-8].*) + echo "$1 is only supported on versions >= v1.19, got ${KUBE_VERSION}" + exit 1 + ;; + esac +} + +# up a cluster with kind +create_cluster() { + # Grab the version of the cluster we're about to start + KUBE_VERSION="$(docker run --rm --entrypoint=cat "kindest/node:latest" /kind/version)" + + # Default Log level for all components in test clusters + KIND_CLUSTER_LOG_LEVEL=${KIND_CLUSTER_LOG_LEVEL:-4} + + # potentially enable --logging-format + CLUSTER_LOG_FORMAT=${CLUSTER_LOG_FORMAT:-} + scheduler_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\"" + controllerManager_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\"" + apiServer_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\"" + if [ -n "$CLUSTER_LOG_FORMAT" ]; then + check_structured_log_support "CLUSTER_LOG_FORMAT" + scheduler_extra_args="${scheduler_extra_args} + \"logging-format\": \"${CLUSTER_LOG_FORMAT}\"" + controllerManager_extra_args="${controllerManager_extra_args} + \"logging-format\": \"${CLUSTER_LOG_FORMAT}\"" + apiServer_extra_args="${apiServer_extra_args} + \"logging-format\": \"${CLUSTER_LOG_FORMAT}\"" + fi + kubelet_extra_args=" \"v\": \"${KIND_CLUSTER_LOG_LEVEL}\"" + KUBELET_LOG_FORMAT=${KUBELET_LOG_FORMAT:-$CLUSTER_LOG_FORMAT} + if [ -n "$KUBELET_LOG_FORMAT" ]; then + check_structured_log_support "KUBECTL_LOG_FORMAT" + kubelet_extra_args="${kubelet_extra_args} + \"logging-format\": \"${KUBELET_LOG_FORMAT}\"" + fi + + # JSON or YAML map injected into featureGates config + feature_gates="${FEATURE_GATES:-{\}}" + # --runtime-config argument value passed to the API server, again as a map + runtime_config="${RUNTIME_CONFIG:-{\}}" + + case "${GA_ONLY:-false}" in + false) + : + ;; + true) + if [ "${feature_gates}" != "{}" ]; then + echo "GA_ONLY=true and FEATURE_GATES=${feature_gates} are mutually exclusive." + exit 1 + fi + if [ "${runtime_config}" != "{}" ]; then + echo "GA_ONLY=true and RUNTIME_CONFIG=${runtime_config} are mutually exclusive." + exit 1 + fi + + echo "Limiting to GA APIs and features for ${KUBE_VERSION}" + feature_gates='{"AllAlpha":false,"AllBeta":false}' + runtime_config='{"api/alpha":"false", "api/beta":"false"}' + ;; + *) + echo "\$GA_ONLY set to '${GA_ONLY}'; supported values are true and false (default)" + exit 1 + ;; + esac + + # create the config file + cat < "${ARTIFACTS}/kind-config.yaml" +# config for 1 control plane node and 2 workers (necessary for conformance) +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + ipFamily: ${IP_FAMILY:-ipv4} + kubeProxyMode: ${KUBE_PROXY_MODE:-iptables} + # don't pass through host search paths + # TODO: possibly a reasonable default in the future for kind ... + dnsSearch: [] +nodes: +- role: control-plane +- role: worker +- role: worker +featureGates: ${feature_gates} +runtimeConfig: ${runtime_config} +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: +${apiServer_extra_args} + controllerManager: + extraArgs: +${controllerManager_extra_args} + scheduler: + extraArgs: +${scheduler_extra_args} + --- + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: +${kubelet_extra_args} + --- + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: +${kubelet_extra_args} +EOF + # NOTE: must match the number of workers above + NUM_NODES=2 + # actually create the cluster + # TODO(BenTheElder): settle on verbosity for this script + KIND_CREATE_ATTEMPTED=true + kind create cluster \ + --image=kindest/node:latest \ + --retain \ + --wait=1m \ + -v=3 \ + "--config=${ARTIFACTS}/kind-config.yaml" + + # debug cluster version + kubectl version + + # Patch kube-proxy to set the verbosity level + kubectl patch -n kube-system daemonset/kube-proxy \ + --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--v='"${KIND_CLUSTER_LOG_LEVEL}"'" }]' +} + +# run e2es with ginkgo-e2e.sh +run_tests() { + # IPv6 clusters need some CoreDNS changes in order to work in k8s CI: + # 1. k8s CI doesn´t offer IPv6 connectivity, so CoreDNS should be configured + # to work in an offline environment: + # https://github.com/coredns/coredns/issues/2494#issuecomment-457215452 + # 2. k8s CI adds following domains to resolv.conf search field: + # c.k8s-prow-builds.internal google.internal. + # CoreDNS should handle those domains and answer with NXDOMAIN instead of SERVFAIL + # otherwise pods stops trying to resolve the domain. + if [ "${IP_FAMILY:-ipv4}" = "ipv6" ]; then + # Get the current config + original_coredns=$(kubectl get -oyaml -n=kube-system configmap/coredns) + echo "Original CoreDNS config:" + echo "${original_coredns}" + # Patch it + fixed_coredns=$( + printf '%s' "${original_coredns}" | sed \ + -e 's/^.*kubernetes cluster\.local/& internal/' \ + -e '/^.*upstream$/d' \ + -e '/^.*fallthrough.*$/d' \ + -e '/^.*forward . \/etc\/resolv.conf$/d' \ + -e '/^.*loop$/d' \ + ) + echo "Patched CoreDNS config:" + echo "${fixed_coredns}" + printf '%s' "${fixed_coredns}" | kubectl apply -f - + fi + + # ginkgo regexes and label filter + SKIP="${SKIP:-}" + FOCUS="${FOCUS:-}" + LABEL_FILTER="${LABEL_FILTER:-}" + if [ -z "${FOCUS}" ] && [ -z "${LABEL_FILTER}" ]; then + FOCUS="\\[Conformance\\]" + fi + # if we set PARALLEL=true, skip serial tests set --ginkgo-parallel + if [ "${PARALLEL:-false}" = "true" ]; then + export GINKGO_PARALLEL=y + if [ -z "${SKIP}" ]; then + SKIP="\\[Serial\\]" + else + SKIP="\\[Serial\\]|${SKIP}" + fi + fi + + # setting this env prevents ginkgo e2e from trying to run provider setup + export KUBERNETES_CONFORMANCE_TEST='y' + # setting these is required to make RuntimeClass tests work ... :/ + export KUBE_CONTAINER_RUNTIME=remote + export KUBE_CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock + export KUBE_CONTAINER_RUNTIME_NAME=containerd + # ginkgo can take forever to exit, so we run it in the background and save the + # PID, bash will not run traps while waiting on a process, but it will while + # running a builtin like `wait`, saving the PID also allows us to forward the + # interrupt + kubectl apply -f cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml || exit 1 + kubectl apply -f cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml || exit 1 + kubectl apply -f cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshots.yaml || exit 1 + kubectl apply -f cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml || exit 1 + kubectl apply -f cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml || exit 1 + kubectl apply -f cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml || exit 1 + + + kubectl apply -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/v8.0.0/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml || exit 1 + kubectl apply -f ./cluster/addons/volumesnapshots/volume-snapshot-controller/setup-snapshot-controller.yaml || exit 1 + + + ./hack/ginkgo-e2e.sh \ + '--provider=skeleton' "--num-nodes=${NUM_NODES}" \ + "--ginkgo.focus=${FOCUS}" "--ginkgo.skip=${SKIP}" "--ginkgo.label-filter=${LABEL_FILTER}" \ + "--report-dir=${ARTIFACTS}" '--disable-log-dump=true' & + GINKGO_PID=$! + wait "$GINKGO_PID" +} + +main() { + # create temp dir and setup cleanup + TMP_DIR=$(mktemp -d) + + # ensure artifacts (results) directory exists when not in CI + export ARTIFACTS="${ARTIFACTS:-${PWD}/_artifacts}" + mkdir -p "${ARTIFACTS}" + + # export the KUBECONFIG to a unique path for testing + KUBECONFIG="${HOME}/.kube/kind-test-config" + export KUBECONFIG + echo "exported KUBECONFIG=${KUBECONFIG}" + + # debug kind version + kind version + + # build kubernetes + build + # in CI attempt to release some memory after building + if [ -n "${KUBETEST_IN_DOCKER:-}" ]; then + sync || true + echo 1 > /proc/sys/vm/drop_caches || true + fi + + # create the cluster and run tests + res=0 + create_cluster || res=$? + run_tests || res=$? + cleanup || res=$? + exit $res +} + +main diff --git a/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml b/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml new file mode 100644 index 0000000000000..aff3a5719dcb7 --- /dev/null +++ b/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml @@ -0,0 +1,94 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/814" + controller-gen.kubebuilder.io/version: v0.15.0 + name: volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io +spec: + group: groupsnapshot.storage.k8s.io + names: + kind: VolumeGroupSnapshotClass + listKind: VolumeGroupSnapshotClassList + plural: volumegroupsnapshotclasses + shortNames: + - vgsclass + - vgsclasses + singular: volumegroupsnapshotclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .driver + name: Driver + type: string + - description: Determines whether a VolumeGroupSnapshotContent created through + the VolumeGroupSnapshotClass should be deleted when its bound VolumeGroupSnapshot + is deleted. + jsonPath: .deletionPolicy + name: DeletionPolicy + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + VolumeGroupSnapshotClass specifies parameters that a underlying storage system + uses when creating a volume group snapshot. A specific VolumeGroupSnapshotClass + is used by specifying its name in a VolumeGroupSnapshot object. + VolumeGroupSnapshotClasses are non-namespaced. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + deletionPolicy: + description: |- + DeletionPolicy determines whether a VolumeGroupSnapshotContent created + through the VolumeGroupSnapshotClass should be deleted when its bound + VolumeGroupSnapshot is deleted. + Supported values are "Retain" and "Delete". + "Retain" means that the VolumeGroupSnapshotContent and its physical group + snapshot on underlying storage system are kept. + "Delete" means that the VolumeGroupSnapshotContent and its physical group + snapshot on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: |- + Driver is the name of the storage driver expected to handle this VolumeGroupSnapshotClass. + Required. + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + parameters: + additionalProperties: + type: string + description: |- + Parameters is a key-value map with storage driver specific parameters for + creating group snapshots. + These values are opaque to Kubernetes and are passed directly to the driver. + type: object + required: + - deletionPolicy + - driver + type: object + served: true + storage: true + subresources: {} diff --git a/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml b/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml new file mode 100644 index 0000000000000..28584e56bfcfa --- /dev/null +++ b/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml @@ -0,0 +1,335 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068" + controller-gen.kubebuilder.io/version: v0.15.0 + name: volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io +spec: + group: groupsnapshot.storage.k8s.io + names: + kind: VolumeGroupSnapshotContent + listKind: VolumeGroupSnapshotContentList + plural: volumegroupsnapshotcontents + shortNames: + - vgsc + - vgscs + singular: volumegroupsnapshotcontent + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Indicates if all the individual snapshots in the group are ready + to be used to restore a group of volumes. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Determines whether this VolumeGroupSnapshotContent and its physical + group snapshot on the underlying storage system should be deleted when its + bound VolumeGroupSnapshot is deleted. + jsonPath: .spec.deletionPolicy + name: DeletionPolicy + type: string + - description: Name of the CSI driver used to create the physical group snapshot + on the underlying storage system. + jsonPath: .spec.driver + name: Driver + type: string + - description: Name of the VolumeGroupSnapshotClass from which this group snapshot + was (or will be) created. + jsonPath: .spec.volumeGroupSnapshotClassName + name: VolumeGroupSnapshotClass + type: string + - description: Namespace of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent + object is bound. + jsonPath: .spec.volumeGroupSnapshotRef.namespace + name: VolumeGroupSnapshotNamespace + type: string + - description: Name of the VolumeGroupSnapshot object to which this VolumeGroupSnapshotContent + object is bound. + jsonPath: .spec.volumeGroupSnapshotRef.name + name: VolumeGroupSnapshot + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + VolumeGroupSnapshotContent represents the actual "on-disk" group snapshot object + in the underlying storage system + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec defines properties of a VolumeGroupSnapshotContent created by the underlying storage system. + Required. + properties: + deletionPolicy: + description: |- + DeletionPolicy determines whether this VolumeGroupSnapshotContent and the + physical group snapshot on the underlying storage system should be deleted + when the bound VolumeGroupSnapshot is deleted. + Supported values are "Retain" and "Delete". + "Retain" means that the VolumeGroupSnapshotContent and its physical group + snapshot on underlying storage system are kept. + "Delete" means that the VolumeGroupSnapshotContent and its physical group + snapshot on underlying storage system are deleted. + For dynamically provisioned group snapshots, this field will automatically + be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field + defined in the corresponding VolumeGroupSnapshotClass. + For pre-existing snapshots, users MUST specify this field when creating the + VolumeGroupSnapshotContent object. + Required. + enum: + - Delete + - Retain + type: string + driver: + description: |- + Driver is the name of the CSI driver used to create the physical group snapshot on + the underlying storage system. + This MUST be the same as the name returned by the CSI GetPluginName() call for + that driver. + Required. + type: string + source: + description: |- + Source specifies whether the snapshot is (or should be) dynamically provisioned + or already exists, and just requires a Kubernetes object representation. + This field is immutable after creation. + Required. + properties: + groupSnapshotHandles: + description: |- + GroupSnapshotHandles specifies the CSI "group_snapshot_id" of a pre-existing + group snapshot and a list of CSI "snapshot_id" of pre-existing snapshots + on the underlying storage system for which a Kubernetes object + representation was (or should be) created. + This field is immutable. + properties: + volumeGroupSnapshotHandle: + description: |- + VolumeGroupSnapshotHandle specifies the CSI "group_snapshot_id" of a pre-existing + group snapshot on the underlying storage system for which a Kubernetes object + representation was (or should be) created. + This field is immutable. + Required. + type: string + volumeSnapshotHandles: + description: |- + VolumeSnapshotHandles is a list of CSI "snapshot_id" of pre-existing + snapshots on the underlying storage system for which Kubernetes objects + representation were (or should be) created. + This field is immutable. + Required. + items: + type: string + type: array + required: + - volumeGroupSnapshotHandle + - volumeSnapshotHandles + type: object + x-kubernetes-validations: + - message: groupSnapshotHandles is immutable + rule: self == oldSelf + volumeHandles: + description: |- + VolumeHandles is a list of volume handles on the backend to be snapshotted + together. It is specified for dynamic provisioning of the VolumeGroupSnapshot. + This field is immutable. + items: + type: string + type: array + x-kubernetes-validations: + - message: volumeHandles is immutable + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: volumeHandles is required once set + rule: '!has(oldSelf.volumeHandles) || has(self.volumeHandles)' + - message: groupSnapshotHandles is required once set + rule: '!has(oldSelf.groupSnapshotHandles) || has(self.groupSnapshotHandles)' + - message: exactly one of volumeHandles and groupSnapshotHandles must + be set + rule: (has(self.volumeHandles) && !has(self.groupSnapshotHandles)) + || (!has(self.volumeHandles) && has(self.groupSnapshotHandles)) + volumeGroupSnapshotClassName: + description: |- + VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass from + which this group snapshot was (or will be) created. + Note that after provisioning, the VolumeGroupSnapshotClass may be deleted or + recreated with different set of values, and as such, should not be referenced + post-snapshot creation. + For dynamic provisioning, this field must be set. + This field may be unset for pre-provisioned snapshots. + type: string + volumeGroupSnapshotRef: + description: |- + VolumeGroupSnapshotRef specifies the VolumeGroupSnapshot object to which this + VolumeGroupSnapshotContent object is bound. + VolumeGroupSnapshot.Spec.VolumeGroupSnapshotContentName field must reference to + this VolumeGroupSnapshotContent's name for the bidirectional binding to be valid. + For a pre-existing VolumeGroupSnapshotContent object, name and namespace of the + VolumeGroupSnapshot object MUST be provided for binding to happen. + This field is immutable after creation. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + TODO: this design is not final and this field is subject to change in the future. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: both volumeGroupSnapshotRef.name and volumeGroupSnapshotRef.namespace + must be set + rule: has(self.name) && has(self.__namespace__) + - message: volumeGroupSnapshotRef is immutable + rule: self == oldSelf + required: + - deletionPolicy + - driver + - source + - volumeGroupSnapshotRef + type: object + status: + description: status represents the current information of a group snapshot. + properties: + creationTime: + description: |- + CreationTime is the timestamp when the point-in-time group snapshot is taken + by the underlying storage system. + If not specified, it indicates the creation time is unknown. + If not specified, it means the readiness of a group snapshot is unknown. + The format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command date +%s%N returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: |- + Error is the last observed error during group snapshot creation, if any. + Upon success after retry, this error field will be cleared. + properties: + message: + description: |- + message is a string detailing the encountered error during snapshot + creation if specified. + NOTE: message may be logged, and it should not contain sensitive + information. + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + pvVolumeSnapshotContentList: + description: |- + PVVolumeSnapshotContentList is the list of pairs of PV and + VolumeSnapshotContent for this group snapshot + The maximum number of allowed snapshots in the group is 100. + items: + description: |- + PVVolumeSnapshotContentPair represent a pair of PV names and + VolumeSnapshotContent names + properties: + persistentVolumeRef: + description: PersistentVolumeRef is a reference to the persistent + volume resource + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeSnapshotContentRef: + description: VolumeSnapshotContentRef is a reference to the + volume snapshot content resource + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + readyToUse: + description: |- + ReadyToUse indicates if all the individual snapshots in the group are ready to be + used to restore a group of volumes. + ReadyToUse becomes true when ReadyToUse of all individual snapshots become true. + type: boolean + volumeGroupSnapshotHandle: + description: |- + VolumeGroupSnapshotHandle is a unique id returned by the CSI driver + to identify the VolumeGroupSnapshot on the storage system. + If a storage system does not provide such an id, the + CSI driver can choose to return the VolumeGroupSnapshot name. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml b/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml new file mode 100644 index 0000000000000..3d9a771dea9d6 --- /dev/null +++ b/cluster/addons/volumesnapshots/crd/groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml @@ -0,0 +1,273 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/1068" + controller-gen.kubebuilder.io/version: v0.15.0 + name: volumegroupsnapshots.groupsnapshot.storage.k8s.io +spec: + group: groupsnapshot.storage.k8s.io + names: + kind: VolumeGroupSnapshot + listKind: VolumeGroupSnapshotList + plural: volumegroupsnapshots + shortNames: + - vgs + singular: volumegroupsnapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicates if all the individual snapshots in the group are ready + to be used to restore a group of volumes. + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: The name of the VolumeGroupSnapshotClass requested by the VolumeGroupSnapshot. + jsonPath: .spec.volumeGroupSnapshotClassName + name: VolumeGroupSnapshotClass + type: string + - description: Name of the VolumeGroupSnapshotContent object to which the VolumeGroupSnapshot + object intends to bind to. Please note that verification of binding actually + requires checking both VolumeGroupSnapshot and VolumeGroupSnapshotContent + to ensure both are pointing at each other. Binding MUST be verified prior + to usage of this object. + jsonPath: .status.boundVolumeGroupSnapshotContentName + name: VolumeGroupSnapshotContent + type: string + - description: Timestamp when the point-in-time group snapshot was taken by the + underlying storage system. + jsonPath: .status.creationTime + name: CreationTime + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + VolumeGroupSnapshot is a user's request for creating either a point-in-time + group snapshot or binding to a pre-existing group snapshot. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec defines the desired characteristics of a group snapshot requested by a user. + Required. + properties: + source: + description: |- + Source specifies where a group snapshot will be created from. + This field is immutable after creation. + Required. + properties: + selector: + description: |- + Selector is a label query over persistent volume claims that are to be + grouped together for snapshotting. + This labelSelector will be used to match the label added to a PVC. + If the label is added or removed to a volume after a group snapshot + is created, the existing group snapshots won't be modified. + Once a VolumeGroupSnapshotContent is created and the sidecar starts to process + it, the volume list will not change with retries. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - message: selector is immutable + rule: self == oldSelf + volumeGroupSnapshotContentName: + description: |- + VolumeGroupSnapshotContentName specifies the name of a pre-existing VolumeGroupSnapshotContent + object representing an existing volume group snapshot. + This field should be set if the volume group snapshot already exists and + only needs a representation in Kubernetes. + This field is immutable. + type: string + x-kubernetes-validations: + - message: volumeGroupSnapshotContentName is immutable + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: selector is required once set + rule: '!has(oldSelf.selector) || has(self.selector)' + - message: volumeGroupSnapshotContentName is required once set + rule: '!has(oldSelf.volumeGroupSnapshotContentName) || has(self.volumeGroupSnapshotContentName)' + - message: exactly one of selector and volumeGroupSnapshotContentName + must be set + rule: (has(self.selector) && !has(self.volumeGroupSnapshotContentName)) + || (!has(self.selector) && has(self.volumeGroupSnapshotContentName)) + volumeGroupSnapshotClassName: + description: |- + VolumeGroupSnapshotClassName is the name of the VolumeGroupSnapshotClass + requested by the VolumeGroupSnapshot. + VolumeGroupSnapshotClassName may be left nil to indicate that the default + class will be used. + Empty string is not allowed for this field. + type: string + x-kubernetes-validations: + - message: volumeGroupSnapshotClassName must not be the empty string + when set + rule: size(self) > 0 + required: + - source + type: object + status: + description: |- + Status represents the current information of a group snapshot. + Consumers must verify binding between VolumeGroupSnapshot and + VolumeGroupSnapshotContent objects is successful (by validating that both + VolumeGroupSnapshot and VolumeGroupSnapshotContent point to each other) before + using this object. + properties: + boundVolumeGroupSnapshotContentName: + description: |- + BoundVolumeGroupSnapshotContentName is the name of the VolumeGroupSnapshotContent + object to which this VolumeGroupSnapshot object intends to bind to. + If not specified, it indicates that the VolumeGroupSnapshot object has not + been successfully bound to a VolumeGroupSnapshotContent object yet. + NOTE: To avoid possible security issues, consumers must verify binding between + VolumeGroupSnapshot and VolumeGroupSnapshotContent objects is successful + (by validating that both VolumeGroupSnapshot and VolumeGroupSnapshotContent + point at each other) before using this object. + type: string + creationTime: + description: |- + CreationTime is the timestamp when the point-in-time group snapshot is taken + by the underlying storage system. + If not specified, it may indicate that the creation time of the group snapshot + is unknown. + The format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command date +%s%N returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: date-time + type: string + error: + description: |- + Error is the last observed error during group snapshot creation, if any. + This field could be helpful to upper level controllers (i.e., application + controller) to decide whether they should continue on waiting for the group + snapshot to be created based on the type of error reported. + The snapshot controller will keep retrying when an error occurs during the + group snapshot creation. Upon success, this error field will be cleared. + properties: + message: + description: |- + message is a string detailing the encountered error during snapshot + creation if specified. + NOTE: message may be logged, and it should not contain sensitive + information. + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + pvcVolumeSnapshotRefList: + description: |- + VolumeSnapshotRefList is the list of PVC and VolumeSnapshot pairs that + is part of this group snapshot. + The maximum number of allowed snapshots in the group is 100. + items: + description: PVCVolumeSnapshotPair defines a pair of a PVC reference + and a Volume Snapshot Reference + properties: + persistentVolumeClaimRef: + description: PersistentVolumeClaimRef is a reference to the + PVC this pair is referring to + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeSnapshotRef: + description: VolumeSnapshotRef is a reference to the VolumeSnapshot + this pair is referring to + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + readyToUse: + description: |- + ReadyToUse indicates if all the individual snapshots in the group are ready + to be used to restore a group of volumes. + ReadyToUse becomes true when ReadyToUse of all individual snapshots become true. + If not specified, it means the readiness of a group snapshot is unknown. + type: boolean + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/cluster/addons/volumesnapshots/volume-snapshot-controller/setup-snapshot-controller.yaml b/cluster/addons/volumesnapshots/volume-snapshot-controller/setup-snapshot-controller.yaml new file mode 100644 index 0000000000000..b4eb3e36cdf27 --- /dev/null +++ b/cluster/addons/volumesnapshots/volume-snapshot-controller/setup-snapshot-controller.yaml @@ -0,0 +1,45 @@ +# This YAML file shows how to deploy the snapshot controller + +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: snapshot-controller + namespace: kube-system +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: snapshot-controller + # The snapshot controller won't be marked as ready if the v1 CRDs are unavailable. + # The flag --retry-crd-interval-max is used to determine how long the controller + # will wait for the CRDs to become available before exiting. The default is 30 seconds + # so minReadySeconds should be set slightly higher than the flag value. + minReadySeconds: 35 + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: snapshot-controller + spec: + serviceAccountName: snapshot-controller + containers: + - name: snapshot-controller + image: registry.k8s.io/sig-storage/snapshot-controller:v8.0.1 + args: + - "--v=5" + - "--leader-election=true" + - "--enable-volume-group-snapshots=true" + # Add a marker to the snapshot-controller manifests. This is needed to enable feature gates in CSI prow jobs. + # For example, in https://github.com/kubernetes-csi/csi-release-tools/pull/209, the snapshot-controller YAML is updated to add --prevent-volume-mode-conversion=true so that the feature can be enabled for certain e2e tests. + # end snapshot controller args + imagePullPolicy: IfNotPresent