diff --git a/.github/ISSUE_TEMPLATE/3_bug_report.yml b/.github/ISSUE_TEMPLATE/3_bug_report.yml index f88af8696a1..ec7a7f40a70 100644 --- a/.github/ISSUE_TEMPLATE/3_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/3_bug_report.yml @@ -57,6 +57,7 @@ body: label: KEDA Version description: What version of KEDA that are you running? options: + - "2.14.0" - "2.13.1" - "2.13.0" - "2.12.1" @@ -66,25 +67,7 @@ body: - "2.11.0" - "2.10.1" - "2.10.0" - - "2.9.3" - - "2.9.2" - - "2.9.1" - - "2.9.0" - - "2.8.2" - - "2.8.1" - - "2.8.0" - - "2.7.1" - - "2.7.0" - - "2.6.1" - - "2.6.0" - - "2.5.0" - - "2.4.0" - - "2.3.0" - - "2.2.0" - - "2.1.0" - - "2.0.0" - - "1.5.0" - - "< 1.5.0" + - "< 2.10.0" - "Other" validations: required: false @@ -93,10 +76,10 @@ body: label: Kubernetes Version description: What version of Kubernetes that are you running? options: - - "1.30" - "1.29" - "1.28" - - "< 1.28" + - "1.27" + - "< 1.27" - "Other" validations: required: false diff --git a/.github/workflows/template-arm64-smoke-tests.yml b/.github/workflows/template-arm64-smoke-tests.yml index 11a2cc9144e..060ae624cbc 100644 --- a/.github/workflows/template-arm64-smoke-tests.yml +++ b/.github/workflows/template-arm64-smoke-tests.yml @@ -10,5 +10,5 @@ jobs: uses: kedacore/keda/.github/workflows/template-smoke-tests.yml@main with: runs-on: ARM64 - kubernetesVersion: v1.28 - kindImage: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 + kubernetesVersion: v1.29 + kindImage: kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245 diff --git a/.github/workflows/template-versions-smoke-tests.yml b/.github/workflows/template-versions-smoke-tests.yml index 72e50616ece..eb8895d16e4 100644 --- a/.github/workflows/template-versions-smoke-tests.yml +++ b/.github/workflows/template-versions-smoke-tests.yml @@ -9,17 +9,14 @@ jobs: strategy: fail-fast: false matrix: - kubernetesVersion: [v1.28, v1.27, v1.26, v1.25] + kubernetesVersion: [v1.29, v1.28, v1.27] include: + - kubernetesVersion: v1.29 + kindImage: kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245 - kubernetesVersion: v1.28 - kindImage: kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31 + kindImage: kindest/node:v1.28.7@sha256:9bc6c451a289cf96ad0bbaf33d416901de6fd632415b076ab05f5fa7e4f65c58 - kubernetesVersion: v1.27 - kindImage: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 - - kubernetesVersion: v1.26 - kindImage: kindest/node:v1.26.6@sha256:6e2d8b28a5b601defe327b98bd1c2d1930b49e5d8c512e1895099e4504007adb - - kubernetesVersion: v1.25 - kindImage: kindest/node:v1.25.11@sha256:227fa11ce74ea76a0474eeefb84cb75d8dad1b08638371ecf0e86259b35be0c8 - + kindImage: kindest/node:v1.27.11@sha256:681253009e68069b8e01aad36a1e0fa8cf18bb0ab3e5c4069b2e65cafdd70843 uses: kedacore/keda/.github/workflows/template-smoke-tests.yml@main with: runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index c68c9147631..b5f384238b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ## History - [Unreleased](#unreleased) +- [v2.14.0](#v2140) - [v2.13.1](#v2131) - [v2.13.0](#v2130) - [v2.12.1](#v2121) @@ -53,12 +54,46 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +#### Experimental + +Here is an overview of all new **experimental** features: + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +### Improvements + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +### Fixes + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +### Deprecations + +You can find all deprecations in [this overview](https://github.com/kedacore/keda/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3Abreaking-change) and [join the discussion here](https://github.com/kedacore/keda/discussions/categories/deprecations). + +New deprecation(s): + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +### Breaking Changes + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +### Other + +- TODO ([#XXX](https://github.com/kedacore/keda/issues/XXX)) + +## v2.14.0 + +### New + - **General**: Provide capability to filter CloudEvents ([#3533](https://github.com/kedacore/keda/issues/3533)) - **NATS Scaler**: Add TLS authentication ([#2296](https://github.com/kedacore/keda/issues/2296)) - **ScaledObject**: Ability to specify `initialCooldownPeriod` ([#5008](https://github.com/kedacore/keda/issues/5008)) - - #### Experimental Here is an overview of all new **experimental** features: diff --git a/config/crd/bases/keda.sh_scaledjobs.yaml b/config/crd/bases/keda.sh_scaledjobs.yaml index 0cdc2c3bb3c..2ff92b03b46 100644 --- a/config/crd/bases/keda.sh_scaledjobs.yaml +++ b/config/crd/bases/keda.sh_scaledjobs.yaml @@ -97,8 +97,8 @@ spec: batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. - This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - feature gate is enabled (enabled by default). + This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + feature gate is enabled (disabled by default). format: int32 type: integer completionMode: @@ -162,8 +162,8 @@ spec: It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. - This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - feature gate is enabled (enabled by default). + This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + feature gate is enabled (disabled by default). format: int32 type: integer parallelism: @@ -211,8 +211,8 @@ spec: running pods are terminated. - FailIndex: indicates that the pod's index is marked as Failed and will not be restarted. - This value is beta-level. It can be used when the - `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + This value is alpha-level. It can be used when the + `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). - Ignore: indicates that the counter towards the .backoffLimit is not incremented and a replacement pod is created. - Count: indicates that the pod is handled in the default way - the @@ -310,8 +310,7 @@ spec: When using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - This is on by default. + This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. type: string selector: description: |- @@ -612,9 +611,8 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set + of resources, in this case pods. properties: matchExpressions: description: matchExpressions is @@ -659,36 +657,6 @@ spec: type: object type: object x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic namespaceSelector: description: |- A label query over the set of namespaces that the term applies to. @@ -790,9 +758,8 @@ spec: a pod of the set of pods is running properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of + resources, in this case pods. properties: matchExpressions: description: matchExpressions is a list @@ -837,36 +804,6 @@ spec: type: object type: object x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic namespaceSelector: description: |- A label query over the set of namespaces that the term applies to. @@ -966,9 +903,8 @@ spec: associated with the corresponding weight. properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set + of resources, in this case pods. properties: matchExpressions: description: matchExpressions is @@ -1013,36 +949,6 @@ spec: type: object type: object x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic namespaceSelector: description: |- A label query over the set of namespaces that the term applies to. @@ -1144,9 +1050,8 @@ spec: a pod of the set of pods is running properties: labelSelector: - description: |- - A label query over a set of resources, in this case pods. - If it's null, this PodAffinityTerm matches with no Pods. + description: A label query over a set of + resources, in this case pods. properties: matchExpressions: description: matchExpressions is a list @@ -1191,36 +1096,6 @@ spec: type: object type: object x-kubernetes-map-type: atomic - matchLabelKeys: - description: |- - MatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic - mismatchLabelKeys: - description: |- - MismatchLabelKeys is a set of pod label keys to select which pods will - be taken into consideration. The keys are used to lookup values from the - incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` - to select the group of existing pods which pods will be taken into consideration - for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - pod labels will be ignored. The default value is empty. - The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - items: - type: string - type: array - x-kubernetes-list-type: atomic namespaceSelector: description: |- A label query over the set of namespaces that the term applies to. @@ -1599,19 +1474,6 @@ spec: required: - port type: object - sleep: - description: Sleep represents the duration - that the container should sleep before - being terminated. - properties: - seconds: - description: Seconds is the number of - seconds to sleep. - format: int64 - type: integer - required: - - seconds - type: object tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept @@ -1715,19 +1577,6 @@ spec: required: - port type: object - sleep: - description: Sleep represents the duration - that the container should sleep before - being terminated. - properties: - seconds: - description: Seconds is the number of - seconds to sleep. - format: int64 - type: integer - required: - - seconds - type: object tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept @@ -3031,19 +2880,6 @@ spec: required: - port type: object - sleep: - description: Sleep represents the duration - that the container should sleep before - being terminated. - properties: - seconds: - description: Seconds is the number of - seconds to sleep. - format: int64 - type: integer - required: - - seconds - type: object tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept @@ -3147,19 +2983,6 @@ spec: required: - port type: object - sleep: - description: Sleep represents the duration - that the container should sleep before - being terminated. - properties: - seconds: - description: Seconds is the number of - seconds to sleep. - format: int64 - type: integer - required: - - seconds - type: object tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept @@ -4461,19 +4284,6 @@ spec: required: - port type: object - sleep: - description: Sleep represents the duration - that the container should sleep before - being terminated. - properties: - seconds: - description: Seconds is the number of - seconds to sleep. - format: int64 - type: integer - required: - - seconds - type: object tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept @@ -4577,19 +4387,6 @@ spec: required: - port type: object - sleep: - description: Sleep represents the duration - that the container should sleep before - being terminated. - properties: - seconds: - description: Seconds is the number of - seconds to sleep. - format: int64 - type: integer - required: - - seconds - type: object tcpSocket: description: |- Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept @@ -6787,6 +6584,34 @@ spec: status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -6864,21 +6689,6 @@ spec: storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 type: string - volumeAttributesClassName: - description: |- - volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - If specified, the CSI driver will create or update the volume with the attributes defined - in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. - If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - exists. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. - type: string volumeMode: description: |- volumeMode defines what type of volume is required by the claim. @@ -7289,102 +7099,6 @@ spec: description: Projection that may be projected along with other supported volume types properties: - clusterTrustBundle: - description: |- - ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - of ClusterTrustBundle objects in an auto-updating file. - - - Alpha, gated by the ClusterTrustBundleProjection feature gate. - - - ClusterTrustBundle objects can either be selected by name, or by the - combination of signer name and a label selector. - - - Kubelet performs aggressive normalization of the PEM contents written - into the pod filesystem. Esoteric PEM features such as inter-block - comments and block headers are stripped. Certificates are deduplicated. - The ordering of certificates within the file is arbitrary, and Kubelet - may change the order over time. - properties: - labelSelector: - description: |- - Select all ClusterTrustBundles that match this label selector. Only has - effect if signerName is set. Mutually-exclusive with name. If unset, - interpreted as "match nothing". If set but empty, interpreted as "match - everything". - properties: - matchExpressions: - description: matchExpressions - is a list of label selector - requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the - label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - name: - description: |- - Select a single ClusterTrustBundle by object name. Mutually-exclusive - with signerName and labelSelector. - type: string - optional: - description: |- - If true, don't block pod startup if the referenced ClusterTrustBundle(s) - aren't available. If using name, then the named ClusterTrustBundle is - allowed not to exist. If using signerName, then the combination of - signerName and labelSelector is allowed to match zero - ClusterTrustBundles. - type: boolean - path: - description: Relative path from the - volume root to write the bundle. - type: string - signerName: - description: |- - Select all ClusterTrustBundles that match this signer name. - Mutually-exclusive with name. The contents of all selected - ClusterTrustBundles will be unified and deduplicated. - type: string - required: - - path - type: object configMap: description: configMap information about the configMap data to project diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 709bd4d0b9b..6ca6d10cfe0 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -107,14 +107,6 @@ rules: - cloudeventsources/status verbs: - '*' -- apiGroups: - - flowcontrol.apiserver.k8s.io - resources: - - flowschemas - - prioritylevelconfigurations - verbs: - - list - - watch - apiGroups: - keda.sh resources: diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index ea97fa9b337..27625f6f10a 100755 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -65,7 +65,6 @@ import ( // +kubebuilder:rbac:groups="apps",resources=deployments;statefulsets,verbs=list;watch // +kubebuilder:rbac:groups="coordination.k8s.io",namespace=keda,resources=leases,verbs="*" // +kubebuilder:rbac:groups="",resources="limitranges",verbs=list;watch -// +kubebuilder:rbac:groups=flowcontrol.apiserver.k8s.io,resources=prioritylevelconfigurations;flowschemas,verbs=list;watch // ScaledObjectReconciler reconciles a ScaledObject object type ScaledObjectReconciler struct { diff --git a/go.mod b/go.mod index ca6e971c300..64fa2055d41 100644 --- a/go.mod +++ b/go.mod @@ -96,37 +96,38 @@ require ( google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/protobuf v1.33.0 - k8s.io/api v0.29.4 - k8s.io/apimachinery v0.29.4 - k8s.io/apiserver v0.29.4 + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/apiserver v0.29.2 k8s.io/client-go v1.5.2 - k8s.io/code-generator v0.29.4 - k8s.io/component-base v0.29.4 + k8s.io/code-generator v0.29.2 + k8s.io/component-base v0.29.2 k8s.io/klog/v2 v2.120.1 - k8s.io/kube-openapi v0.0.0-20240126223410-2919ad4fcfec - k8s.io/metrics v0.29.4 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/metrics v0.28.9 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 knative.dev/pkg v0.0.0-20240423132823-3c6badc82748 sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202 sigs.k8s.io/controller-tools v0.14.0 - sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240424182356-4f8ac354df3b + sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1 sigs.k8s.io/kustomize/kustomize/v5 v5.4.1 ) replace ( - // pin k8s.io to v0.29.4 - github.com/google/cel-go => github.com/google/cel-go v0.17.7 + // pin k8s.io to v0.28.9 + github.com/google/cel-go => github.com/google/cel-go v0.16.1 github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_model => github.com/prometheus/client_model v0.4.0 github.com/prometheus/common => github.com/prometheus/common v0.44.0 - k8s.io/api => k8s.io/api v0.29.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 - k8s.io/apiserver => k8s.io/apiserver v0.29.4 - k8s.io/client-go => k8s.io/client-go v0.29.4 - k8s.io/code-generator => k8s.io/code-generator v0.29.4 - k8s.io/component-base => k8s.io/component-base v0.29.4 - k8s.io/metrics => k8s.io/metrics v0.29.4 + k8s.io/api => k8s.io/api v0.28.9 + k8s.io/apimachinery => k8s.io/apimachinery v0.28.9 + k8s.io/apiserver => k8s.io/apiserver v0.28.9 + k8s.io/client-go => k8s.io/client-go v0.28.9 + k8s.io/code-generator => k8s.io/code-generator v0.28.9 + k8s.io/component-base => k8s.io/component-base v0.28.9 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 + k8s.io/metrics => k8s.io/metrics v0.28.9 ) replace ( @@ -247,7 +248,6 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.3 // indirect - github.com/gorilla/websocket v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect @@ -297,7 +297,6 @@ require ( github.com/montanaflynn/stats v0.7.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/oapi-codegen/runtime v1.1.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect @@ -363,7 +362,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 k8s.io/apiextensions-apiserver v0.29.2 // indirect k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 // indirect - k8s.io/kms v0.29.4 // indirect + k8s.io/kms v0.29.2 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.1 // indirect diff --git a/go.sum b/go.sum index cc5ecbb3f65..5a79728635c 100644 --- a/go.sum +++ b/go.sum @@ -1783,8 +1783,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= +github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= +github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -2129,8 +2129,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/newrelic/newrelic-client-go v1.1.0 h1:aflNjzQ21c+2GwBVh+UbAf9lznkRfCcVABoc5UM4IXw= @@ -3146,33 +3144,33 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= +k8s.io/api v0.28.9 h1:E7VEXXCAlSrp+08zq4zgd+ko6Ttu0Mw+XoXlIkDTVW0= +k8s.io/api v0.28.9/go.mod h1:AnCsDYf3SHjfa8mPG5LGYf+iF4mie+3peLQR51MMCgw= k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q= -k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= -k8s.io/apiserver v0.29.4 h1:wPwGOO58GQOpRiZu59P5eRoDcB7QtV+QBglkRiXwCiM= -k8s.io/apiserver v0.29.4/go.mod h1:VqTF9t98HVfhKZVRohCPezsdUt9u2g3bHKftxGcXoRo= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= -k8s.io/code-generator v0.29.4 h1:8ESudFNbY5/9BzB8KOEFG2uV9Q0AQxkc4mrQESr30Ks= -k8s.io/code-generator v0.29.4/go.mod h1:7TYnI0dYItL2cKuhhgPSuF3WED9uMdELgbVXFfn/joE= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/apimachinery v0.28.9 h1:aXz4Zxsw+Pk4KhBerAtKRxNN1uSMWKfciL/iOdBfXvA= +k8s.io/apimachinery v0.28.9/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= +k8s.io/apiserver v0.28.9 h1:koPXvgSXRBDxKJQjJGdZNgPsT9lQv6scJJFipd1m86E= +k8s.io/apiserver v0.28.9/go.mod h1:D51I37WBZojJhmLcjNVE4GSVrjiUHP+yq+N5KvKn2wY= +k8s.io/client-go v0.28.9 h1:mmMvejwc/KDjMLmDpyaxkWNzlWRCJ6ht7Qsbsnwn39Y= +k8s.io/client-go v0.28.9/go.mod h1:GFDy3rUNId++WGrr0hRaBrs+y1eZz5JtVZODEalhRMo= +k8s.io/code-generator v0.28.9 h1:NyZt4+equopQNbwjSSpVikB15U4ghmvIaqn+VWd367U= +k8s.io/code-generator v0.28.9/go.mod h1:WiJgVNDFAlT90nq6IOxhZ1gxL2JexbcfAx9ZBsyQ3Do= +k8s.io/component-base v0.28.9 h1:ySM2PR8Z/xaUSG1Akd3yM6dqUezTltI7S5aV41MMuuc= +k8s.io/component-base v0.28.9/go.mod h1:QtWzscEhCKRfHV24/S+11BwWjVxhC6fd3RYoEgZcWFU= k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 h1:izq7u3SJBdOAuA5YYe1/PIp9jczrih/jGlKRRt0G7bQ= k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.29.4 h1:cFGEoCLwoXk/eqYZppLZxybCdmEWeRKMCbm9f13IdRQ= -k8s.io/kms v0.29.4/go.mod h1:vWVImKkJd+1BQY4tBwdfSwjQBiLrnbNtHADcDEDQFtk= +k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= +k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= k8s.io/kube-aggregator v0.28.1 h1:rvG4llYnQKHjj6YjjoBPEJxfD1uH0DJwkrJTNKGAaCs= k8s.io/kube-aggregator v0.28.1/go.mod h1:JaLizMe+AECSpO2OmrWVsvnG0V3dX1RpW+Wq/QHbu18= -k8s.io/kube-openapi v0.0.0-20240126223410-2919ad4fcfec h1:iGTel2aR8vCZdxJDgmbeY0zrlXy9Qcvyw4R2sB4HLrA= -k8s.io/kube-openapi v0.0.0-20240126223410-2919ad4fcfec/go.mod h1:Pa1PvrP7ACSkuX6I7KYomY6cmMA0Tx86waBhDUgoKPw= -k8s.io/metrics v0.29.4 h1:06sZ63/Kt9HEb5GP/1y6xbHDz6XkxnHpu949UdXfoXQ= -k8s.io/metrics v0.29.4/go.mod h1:ZN9peB0nLTqPZuwQna8ZUrPFJQ0i8QNH4pqRJopS+9c= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/metrics v0.28.9 h1:3TAJhF1GzYK89bE1RLqDinTXAlCnI8UgciwfpKHzKfg= +k8s.io/metrics v0.28.9/go.mod h1:7Hn16jtdxc2Q6Vm73QK7nF7HiLJvomLgN7lEQs8SONs= k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/pkg v0.0.0-20240423132823-3c6badc82748 h1:0X8ZtnOZqGPjauVLLvOyMaBOMX5BBkvAD1/IuxA61Ys= @@ -3251,8 +3249,8 @@ sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e99 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240201105228-4000e996a202/go.mod h1:TF/lVLWS+JNNaVqJuDDictY2hZSXSsIHCx4FClMvqFg= sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A= sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc= -sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240424182356-4f8ac354df3b h1:Q0xk4G+BNGLyzE7J0s38EE0rDEZVGkjMkHf3z0RjGhQ= -sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240424182356-4f8ac354df3b/go.mod h1:4XXz92s/SEmP3L2nlUu6lMWorxEQXAD39AdL22IQkDA= +sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1 h1:HC57TVRzncE2giocqSMfw/ZSVSO9RxBDd5P6olipw1A= +sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1/go.mod h1:Ior2OoZaYHIYR4J/OETrkLln7LzbMd4906WT6Shvpdw= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.1 h1:MYJBOP/yQ3/5tp4/sf6HiiMfNNyO97LmtnirH9SLNr4= diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index f5323dbc77c..38c2b5da8e6 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -42,7 +42,6 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration - transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -81,14 +80,6 @@ func WithNamespace(namespace string) SharedInformerOption { } } -// WithTransform sets a transform on all informers. -func WithTransform(transform cache.TransformFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.transform = transform - return factory - } -} - // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -193,7 +184,6 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) - informer.SetTransform(f.transform) f.informers[informerType] = informer return informer diff --git a/pkg/metricsservice/api/metrics.pb.go b/pkg/metricsservice/api/metrics.pb.go index e024c7d5277..900fea41cda 100644 --- a/pkg/metricsservice/api/metrics.pb.go +++ b/pkg/metricsservice/api/metrics.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.33.0 -// protoc v5.26.1 +// protoc v4.23.4 // source: metrics.proto package api diff --git a/pkg/metricsservice/api/metrics_grpc.pb.go b/pkg/metricsservice/api/metrics_grpc.pb.go index 79e23afc5f6..8836b080af0 100644 --- a/pkg/metricsservice/api/metrics_grpc.pb.go +++ b/pkg/metricsservice/api/metrics_grpc.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v5.26.1 +// - protoc v4.23.4 // source: metrics.proto package api diff --git a/pkg/scalers/externalscaler/externalscaler.pb.go b/pkg/scalers/externalscaler/externalscaler.pb.go index 64f98443bf3..dd75944a6a5 100644 --- a/pkg/scalers/externalscaler/externalscaler.pb.go +++ b/pkg/scalers/externalscaler/externalscaler.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.33.0 -// protoc v5.26.1 +// protoc v4.23.4 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go index 9a56ba097d5..e9944342368 100644 --- a/pkg/scalers/externalscaler/externalscaler_grpc.pb.go +++ b/pkg/scalers/externalscaler/externalscaler_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v5.26.1 +// - protoc v4.23.4 // source: externalscaler.proto package externalscaler diff --git a/pkg/scalers/liiklus/LiiklusService.pb.go b/pkg/scalers/liiklus/LiiklusService.pb.go index 68d241e53a3..473d0673f23 100644 --- a/pkg/scalers/liiklus/LiiklusService.pb.go +++ b/pkg/scalers/liiklus/LiiklusService.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.33.0 -// protoc v5.26.1 +// protoc v4.23.4 // source: LiiklusService.proto package liiklus diff --git a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go index 737f6979e7e..0e3731ee10d 100644 --- a/pkg/scalers/liiklus/LiiklusService_grpc.pb.go +++ b/pkg/scalers/liiklus/LiiklusService_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v5.26.1 +// - protoc v4.23.4 // source: LiiklusService.proto package liiklus diff --git a/pkg/util/welcome.go b/pkg/util/welcome.go index 4a4e6729d32..680894eb177 100644 --- a/pkg/util/welcome.go +++ b/pkg/util/welcome.go @@ -26,8 +26,8 @@ import ( ) const ( - minSupportedVersion = 28 - maxSupportedVersion = 30 + minSupportedVersion = 27 + maxSupportedVersion = 29 ) func PrintWelcome(logger logr.Logger, kubeVersion K8sVersion, component string) { diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel index 0905f635395..4331321139e 100644 --- a/vendor/github.com/google/cel-go/cel/BUILD.bazel +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "macro.go", "options.go", "program.go", - "validator.go", ], importpath = "github.com/google/cel-go/cel", visibility = ["//visibility:public"], @@ -23,18 +22,15 @@ go_library( "//checker:go_default_library", "//checker/decls:go_default_library", "//common:go_default_library", - "//common/ast:go_default_library", "//common/containers:go_default_library", - "//common/decls:go_default_library", - "//common/functions:go_default_library", "//common/operators:go_default_library", "//common/overloads:go_default_library", - "//common/stdlib:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "//interpreter:go_default_library", + "//interpreter/functions:go_default_library", "//parser:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", @@ -76,8 +72,6 @@ go_test( "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", - "@org_golang_google_protobuf//encoding/prototext:go_default_library", "@org_golang_google_protobuf//types/known/structpb:go_default_library", - "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go index 0f9501341b4..c0624d1e596 100644 --- a/vendor/github.com/google/cel-go/cel/decls.go +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -16,133 +16,341 @@ package cel import ( "fmt" + "strings" - "github.com/google/cel-go/common/ast" - "github.com/google/cel-go/common/decls" - "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter/functions" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types. -type Kind = types.Kind +type Kind uint const ( // DynKind represents a dynamic type. This kind only exists at type-check time. - DynKind Kind = types.DynKind + DynKind Kind = iota // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. - AnyKind = types.AnyKind + AnyKind // BoolKind represents a boolean type. - BoolKind = types.BoolKind + BoolKind // BytesKind represents a bytes type. - BytesKind = types.BytesKind + BytesKind // DoubleKind represents a double type. - DoubleKind = types.DoubleKind + DoubleKind // DurationKind represents a CEL duration type. - DurationKind = types.DurationKind + DurationKind // IntKind represents an integer type. - IntKind = types.IntKind + IntKind // ListKind represents a list type. - ListKind = types.ListKind + ListKind // MapKind represents a map type. - MapKind = types.MapKind + MapKind // NullTypeKind represents a null type. - NullTypeKind = types.NullTypeKind + NullTypeKind // OpaqueKind represents an abstract type which has no accessible fields. - OpaqueKind = types.OpaqueKind + OpaqueKind // StringKind represents a string type. - StringKind = types.StringKind + StringKind // StructKind represents a structured object with typed fields. - StructKind = types.StructKind + StructKind // TimestampKind represents a a CEL time type. - TimestampKind = types.TimestampKind + TimestampKind // TypeKind represents the CEL type. - TypeKind = types.TypeKind + TypeKind // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. - TypeParamKind = types.TypeParamKind + TypeParamKind // UintKind represents a uint type. - UintKind = types.UintKind + UintKind ) var ( // AnyType represents the google.protobuf.Any type. - AnyType = types.AnyType + AnyType = &Type{ + kind: AnyKind, + runtimeType: types.NewTypeValue("google.protobuf.Any"), + } // BoolType represents the bool type. - BoolType = types.BoolType + BoolType = &Type{ + kind: BoolKind, + runtimeType: types.BoolType, + } // BytesType represents the bytes type. - BytesType = types.BytesType + BytesType = &Type{ + kind: BytesKind, + runtimeType: types.BytesType, + } // DoubleType represents the double type. - DoubleType = types.DoubleType + DoubleType = &Type{ + kind: DoubleKind, + runtimeType: types.DoubleType, + } // DurationType represents the CEL duration type. - DurationType = types.DurationType + DurationType = &Type{ + kind: DurationKind, + runtimeType: types.DurationType, + } // DynType represents a dynamic CEL type whose type will be determined at runtime from context. - DynType = types.DynType + DynType = &Type{ + kind: DynKind, + runtimeType: types.NewTypeValue("dyn"), + } // IntType represents the int type. - IntType = types.IntType + IntType = &Type{ + kind: IntKind, + runtimeType: types.IntType, + } // NullType represents the type of a null value. - NullType = types.NullType + NullType = &Type{ + kind: NullTypeKind, + runtimeType: types.NullType, + } // StringType represents the string type. - StringType = types.StringType + StringType = &Type{ + kind: StringKind, + runtimeType: types.StringType, + } // TimestampType represents the time type. - TimestampType = types.TimestampType + TimestampType = &Type{ + kind: TimestampKind, + runtimeType: types.TimestampType, + } // TypeType represents a CEL type - TypeType = types.TypeType + TypeType = &Type{ + kind: TypeKind, + runtimeType: types.TypeType, + } // UintType represents a uint type. - UintType = types.UintType - - // function references for instantiating new types. - - // ListType creates an instances of a list type value with the provided element type. - ListType = types.NewListType - // MapType creates an instance of a map type value with the provided key and value types. - MapType = types.NewMapType - // NullableType creates an instance of a nullable type with the provided wrapped type. - // - // Note: only primitive types are supported as wrapped types. - NullableType = types.NewNullableType - // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. - OptionalType = types.NewOptionalType - // OpaqueType creates an abstract parameterized type with a given name. - OpaqueType = types.NewOpaqueType - // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type. - ObjectType = types.NewObjectType - // TypeParamType creates a parameterized type instance. - TypeParamType = types.NewTypeParamType + UintType = &Type{ + kind: UintKind, + runtimeType: types.UintType, + } ) // Type holds a reference to a runtime type with an optional type-checked set of type parameters. -type Type = types.Type +type Type struct { + // kind indicates general category of the type. + kind Kind -// Constant creates an instances of an identifier declaration with a variable name, type, and value. -func Constant(name string, t *Type, v ref.Val) EnvOption { - return func(e *Env) (*Env, error) { - e.variables = append(e.variables, decls.NewConstant(name, t, v)) - return e, nil + // runtimeType is the runtime type of the declaration. + runtimeType ref.Type + + // parameters holds the optional type-checked set of type parameters that are used during static analysis. + parameters []*Type + + // isAssignableType function determines whether one type is assignable to this type. + // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters. + isAssignableType func(other *Type) bool + + // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type. + // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name. + isAssignableRuntimeType func(other ref.Val) bool +} + +// IsAssignableType determines whether the current type is type-check assignable from the input fromType. +func (t *Type) IsAssignableType(fromType *Type) bool { + if t.isAssignableType != nil { + return t.isAssignableType(fromType) + } + return t.defaultIsAssignableType(fromType) +} + +// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType. +// +// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string) +// will have a runtime assignable type of a map. +func (t *Type) IsAssignableRuntimeType(val ref.Val) bool { + if t.isAssignableRuntimeType != nil { + return t.isAssignableRuntimeType(val) + } + return t.defaultIsAssignableRuntimeType(val) +} + +// String returns a human-readable definition of the type name. +func (t *Type) String() string { + if len(t.parameters) == 0 { + return t.runtimeType.TypeName() + } + params := make([]string, len(t.parameters)) + for i, p := range t.parameters { + params[i] = p.String() + } + return fmt.Sprintf("%s(%s)", t.runtimeType.TypeName(), strings.Join(params, ", ")) +} + +// isDyn indicates whether the type is dynamic in any way. +func (t *Type) isDyn() bool { + return t.kind == DynKind || t.kind == AnyKind || t.kind == TypeParamKind +} + +// equals indicates whether two types have the same kind, type name, and parameters. +func (t *Type) equals(other *Type) bool { + if t.kind != other.kind || + t.runtimeType.TypeName() != other.runtimeType.TypeName() || + len(t.parameters) != len(other.parameters) { + return false + } + for i, p := range t.parameters { + if !p.equals(other.parameters[i]) { + return false + } + } + return true +} + +// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another +// where any of the following may return a true result: +// - The from types are the same instance +// - The target type is dynamic +// - The fromType has the same kind and type name as the target type, and all parameters of the target type +// +// are IsAssignableType() from the parameters of the fromType. +func (t *Type) defaultIsAssignableType(fromType *Type) bool { + if t == fromType || t.isDyn() { + return true + } + if t.kind != fromType.kind || + t.runtimeType.TypeName() != fromType.runtimeType.TypeName() || + len(t.parameters) != len(fromType.parameters) { + return false + } + for i, tp := range t.parameters { + fp := fromType.parameters[i] + if !tp.IsAssignableType(fp) { + return false + } + } + return true +} + +// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types +// to determine whether a ref.Val is assignable to the declared type for a function signature. +func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool { + valType := val.Type() + if !(t.runtimeType == valType || t.isDyn() || t.runtimeType.TypeName() == valType.TypeName()) { + return false + } + switch t.runtimeType { + case types.ListType: + elemType := t.parameters[0] + l := val.(traits.Lister) + if l.Size() == types.IntZero { + return true + } + it := l.Iterator() + for it.HasNext() == types.True { + elemVal := it.Next() + return elemType.IsAssignableRuntimeType(elemVal) + } + case types.MapType: + keyType := t.parameters[0] + elemType := t.parameters[1] + m := val.(traits.Mapper) + if m.Size() == types.IntZero { + return true + } + it := m.Iterator() + for it.HasNext() == types.True { + keyVal := it.Next() + elemVal := m.Get(keyVal) + return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal) + } + } + return true +} + +// ListType creates an instances of a list type value with the provided element type. +func ListType(elemType *Type) *Type { + return &Type{ + kind: ListKind, + runtimeType: types.ListType, + parameters: []*Type{elemType}, + } +} + +// MapType creates an instance of a map type value with the provided key and value types. +func MapType(keyType, valueType *Type) *Type { + return &Type{ + kind: MapKind, + runtimeType: types.MapType, + parameters: []*Type{keyType, valueType}, + } +} + +// NullableType creates an instance of a nullable type with the provided wrapped type. +// +// Note: only primitive types are supported as wrapped types. +func NullableType(wrapped *Type) *Type { + return &Type{ + kind: wrapped.kind, + runtimeType: wrapped.runtimeType, + parameters: wrapped.parameters, + isAssignableType: func(other *Type) bool { + return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other) + }, + isAssignableRuntimeType: func(other ref.Val) bool { + return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other) + }, + } +} + +// OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. +func OptionalType(param *Type) *Type { + return OpaqueType("optional", param) +} + +// OpaqueType creates an abstract parameterized type with a given name. +func OpaqueType(name string, params ...*Type) *Type { + return &Type{ + kind: OpaqueKind, + runtimeType: types.NewTypeValue(name), + parameters: params, + } +} + +// ObjectType creates a type references to an externally defined type, e.g. a protobuf message type. +func ObjectType(typeName string) *Type { + return &Type{ + kind: StructKind, + runtimeType: types.NewObjectTypeValue(typeName), + } +} + +// TypeParamType creates a parameterized type instance. +func TypeParamType(paramName string) *Type { + return &Type{ + kind: TypeParamKind, + runtimeType: types.NewTypeValue(paramName), } } // Variable creates an instance of a variable declaration with a variable name and type. func Variable(name string, t *Type) EnvOption { return func(e *Env) (*Env, error) { - e.variables = append(e.variables, decls.NewVariable(name, t)) + et, err := TypeToExprType(t) + if err != nil { + return nil, err + } + e.declarations = append(e.declarations, decls.NewVar(name, et)) return e, nil } } @@ -178,30 +386,53 @@ func Variable(name string, t *Type) EnvOption { // overload as CEL can only make inferences by type-name regarding such types. func Function(name string, opts ...FunctionOpt) EnvOption { return func(e *Env) (*Env, error) { - fn, err := decls.NewFunction(name, opts...) + fn := &functionDecl{ + name: name, + overloads: []*overloadDecl{}, + options: opts, + } + err := fn.init() + if err != nil { + return nil, err + } + _, err = functionDeclToExprDecl(fn) if err != nil { return nil, err } - if existing, found := e.functions[fn.Name()]; found { - fn, err = existing.Merge(fn) + if existing, found := e.functions[fn.name]; found { + fn, err = existing.merge(fn) if err != nil { return nil, err } } - e.functions[fn.Name()] = fn + e.functions[name] = fn return e, nil } } // FunctionOpt defines a functional option for configuring a function declaration. -type FunctionOpt = decls.FunctionOpt +type FunctionOpt func(*functionDecl) (*functionDecl, error) // SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. // // Note, this approach works well if operand is expected to have a specific trait which it implements, // e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { - return decls.SingletonUnaryBinding(fn, traits...) + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *functionDecl) (*functionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.name) + } + f.singleton = &functions.Overload{ + Operator: f.name, + Unary: fn, + OperandTrait: trait, + } + return f, nil + } } // SingletonBinaryImpl creates a singleton function definition to be used with all function overloads. @@ -211,7 +442,7 @@ func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { // // Deprecated: use SingletonBinaryBinding func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt { - return decls.SingletonBinaryBinding(fn, traits...) + return SingletonBinaryBinding(fn, traits...) } // SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. @@ -219,7 +450,21 @@ func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt { // Note, this approach works well if operand is expected to have a specific trait which it implements, // e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { - return decls.SingletonBinaryBinding(fn, traits...) + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *functionDecl) (*functionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.name) + } + f.singleton = &functions.Overload{ + Operator: f.name, + Binary: fn, + OperandTrait: trait, + } + return f, nil + } } // SingletonFunctionImpl creates a singleton function definition to be used with all function overloads. @@ -229,7 +474,7 @@ func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { // // Deprecated: use SingletonFunctionBinding func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt { - return decls.SingletonFunctionBinding(fn, traits...) + return SingletonFunctionBinding(fn, traits...) } // SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. @@ -237,13 +482,21 @@ func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt { // Note, this approach works well if operand is expected to have a specific trait which it implements, // e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { - return decls.SingletonFunctionBinding(fn, traits...) -} - -// DisableDeclaration disables the function signatures, effectively removing them from the type-check -// environment while preserving the runtime bindings. -func DisableDeclaration(value bool) FunctionOpt { - return decls.DisableDeclaration(value) + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *functionDecl) (*functionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.name) + } + f.singleton = &functions.Overload{ + Operator: f.name, + Function: fn, + OperandTrait: trait, + } + return f, nil + } } // Overload defines a new global overload with an overload id, argument types, and result type. Through the @@ -253,7 +506,7 @@ func DisableDeclaration(value bool) FunctionOpt { // Note: function bindings should be commonly configured with Overload instances whereas operand traits and // strict-ness should be rare occurrences. func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { - return decls.Overload(overloadID, args, resultType, opts...) + return newOverload(overloadID, false, args, resultType, opts...) } // MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, @@ -263,51 +516,609 @@ func Overload(overloadID string, args []*Type, resultType *Type, opts ...Overloa // Note: function bindings should be commonly configured with Overload instances whereas operand traits and // strict-ness should be rare occurrences. func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { - return decls.MemberOverload(overloadID, args, resultType, opts...) + return newOverload(overloadID, true, args, resultType, opts...) } // OverloadOpt is a functional option for configuring a function overload. -type OverloadOpt = decls.OverloadOpt +type OverloadOpt func(*overloadDecl) (*overloadDecl, error) // UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func UnaryBinding(binding functions.UnaryOp) OverloadOpt { - return decls.UnaryBinding(binding) + return func(o *overloadDecl) (*overloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.id) + } + if len(o.argTypes) != 1 { + return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.id) + } + o.unaryOp = binding + return o, nil + } } // BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func BinaryBinding(binding functions.BinaryOp) OverloadOpt { - return decls.BinaryBinding(binding) + return func(o *overloadDecl) (*overloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.id) + } + if len(o.argTypes) != 2 { + return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.id) + } + o.binaryOp = binding + return o, nil + } } // FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime // type-guard which ensures runtime type agreement between the overload signature and runtime argument types. func FunctionBinding(binding functions.FunctionOp) OverloadOpt { - return decls.FunctionBinding(binding) + return func(o *overloadDecl) (*overloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.id) + } + o.functionOp = binding + return o, nil + } } // OverloadIsNonStrict enables the function to be called with error and unknown argument values. // // Note: do not use this option unless absoluately necessary as it should be an uncommon feature. func OverloadIsNonStrict() OverloadOpt { - return decls.OverloadIsNonStrict() + return func(o *overloadDecl) (*overloadDecl, error) { + o.nonStrict = true + return o, nil + } } // OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be // successfully invoked. func OverloadOperandTrait(trait int) OverloadOpt { - return decls.OverloadOperandTrait(trait) + return func(o *overloadDecl) (*overloadDecl, error) { + o.operandTrait = trait + return o, nil + } +} + +type functionDecl struct { + name string + overloads []*overloadDecl + options []FunctionOpt + singleton *functions.Overload + initialized bool +} + +// init ensures that a function's options have been applied. +// +// This function is used in both the environment configuration and internally for function merges. +func (f *functionDecl) init() error { + if f.initialized { + return nil + } + f.initialized = true + + var err error + for _, opt := range f.options { + f, err = opt(f) + if err != nil { + return err + } + } + if len(f.overloads) == 0 { + return fmt.Errorf("function %s must have at least one overload", f.name) + } + return nil +} + +// bindings produces a set of function bindings, if any are defined. +func (f *functionDecl) bindings() ([]*functions.Overload, error) { + overloads := []*functions.Overload{} + nonStrict := false + for _, o := range f.overloads { + if o.hasBinding() { + overload := &functions.Overload{ + Operator: o.id, + Unary: o.guardedUnaryOp(f.name), + Binary: o.guardedBinaryOp(f.name), + Function: o.guardedFunctionOp(f.name), + OperandTrait: o.operandTrait, + NonStrict: o.nonStrict, + } + overloads = append(overloads, overload) + nonStrict = nonStrict || o.nonStrict + } + } + if f.singleton != nil { + if len(overloads) != 0 { + return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.name) + } + return []*functions.Overload{ + { + Operator: f.name, + Unary: f.singleton.Unary, + Binary: f.singleton.Binary, + Function: f.singleton.Function, + OperandTrait: f.singleton.OperandTrait, + }, + }, nil + } + if len(overloads) == 0 { + return overloads, nil + } + // Single overload. Replicate an entry for it using the function name as well. + if len(overloads) == 1 { + if overloads[0].Operator == f.name { + return overloads, nil + } + return append(overloads, &functions.Overload{ + Operator: f.name, + Unary: overloads[0].Unary, + Binary: overloads[0].Binary, + Function: overloads[0].Function, + NonStrict: overloads[0].NonStrict, + OperandTrait: overloads[0].OperandTrait, + }), nil + } + // All of the defined overloads are wrapped into a top-level function which + // performs dynamic dispatch to the proper overload based on the argument types. + bindings := append([]*functions.Overload{}, overloads...) + funcDispatch := func(args ...ref.Val) ref.Val { + for _, o := range f.overloads { + if !o.matchesRuntimeSignature(args...) { + continue + } + switch len(args) { + case 1: + if o.unaryOp != nil { + return o.unaryOp(args[0]) + } + case 2: + if o.binaryOp != nil { + return o.binaryOp(args[0], args[1]) + } + } + if o.functionOp != nil { + return o.functionOp(args...) + } + // eventually this will fall through to the noSuchOverload below. + } + return noSuchOverload(f.name, args...) + } + function := &functions.Overload{ + Operator: f.name, + Function: funcDispatch, + NonStrict: nonStrict, + } + return append(bindings, function), nil +} + +// merge one function declaration with another. +// +// If a function is extended, by say adding new overloads to an existing function, then it is merged with the +// prior definition of the function at which point its overloads must not collide with pre-existing overloads +// and its bindings (singleton, or per-overload) must not conflict with previous definitions either. +func (f *functionDecl) merge(other *functionDecl) (*functionDecl, error) { + if f.name != other.name { + return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.name, other.name) + } + err := f.init() + if err != nil { + return nil, err + } + err = other.init() + if err != nil { + return nil, err + } + merged := &functionDecl{ + name: f.name, + overloads: make([]*overloadDecl, len(f.overloads)), + options: []FunctionOpt{}, + initialized: true, + singleton: f.singleton, + } + copy(merged.overloads, f.overloads) + for _, o := range other.overloads { + err := merged.addOverload(o) + if err != nil { + return nil, fmt.Errorf("function declaration merge failed: %v", err) + } + } + if other.singleton != nil { + if merged.singleton != nil { + return nil, fmt.Errorf("function already has a binding: %s", f.name) + } + merged.singleton = other.singleton + } + return merged, nil +} + +// addOverload ensures that the new overload does not collide with an existing overload signature; +// however, if the function signatures are identical, the implementation may be rewritten as its +// difficult to compare functions by object identity. +func (f *functionDecl) addOverload(overload *overloadDecl) error { + for index, o := range f.overloads { + if o.id != overload.id && o.signatureOverlaps(overload) { + return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.name, o.id, overload.id) + } + if o.id == overload.id { + if o.signatureEquals(overload) && o.nonStrict == overload.nonStrict { + // Allow redefinition of an overload implementation so long as the signatures match. + f.overloads[index] = overload + return nil + } + return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.name, o.id) + } + } + f.overloads = append(f.overloads, overload) + return nil +} + +func noSuchOverload(funcName string, args ...ref.Val) ref.Val { + argTypes := make([]string, len(args)) + for i, arg := range args { + argTypes[i] = arg.Type().TypeName() + } + signature := strings.Join(argTypes, ", ") + return types.NewErr("no such overload: %s(%s)", funcName, signature) +} + +// overloadDecl contains all of the relevant information regarding a specific function overload. +type overloadDecl struct { + id string + argTypes []*Type + resultType *Type + memberFunction bool + + // binding options, optional but encouraged. + unaryOp functions.UnaryOp + binaryOp functions.BinaryOp + functionOp functions.FunctionOp + + // behavioral options, uncommon + nonStrict bool + operandTrait int +} + +func (o *overloadDecl) hasBinding() bool { + return o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil +} + +// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined. +func (o *overloadDecl) guardedUnaryOp(funcName string) functions.UnaryOp { + if o.unaryOp == nil { + return nil + } + return func(arg ref.Val) ref.Val { + if !o.matchesRuntimeUnarySignature(arg) { + return noSuchOverload(funcName, arg) + } + return o.unaryOp(arg) + } +} + +// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined. +func (o *overloadDecl) guardedBinaryOp(funcName string) functions.BinaryOp { + if o.binaryOp == nil { + return nil + } + return func(arg1, arg2 ref.Val) ref.Val { + if !o.matchesRuntimeBinarySignature(arg1, arg2) { + return noSuchOverload(funcName, arg1, arg2) + } + return o.binaryOp(arg1, arg2) + } +} + +// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided. +func (o *overloadDecl) guardedFunctionOp(funcName string) functions.FunctionOp { + if o.functionOp == nil { + return nil + } + return func(args ...ref.Val) ref.Val { + if !o.matchesRuntimeSignature(args...) { + return noSuchOverload(funcName, args...) + } + return o.functionOp(args...) + } +} + +// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument. +func (o *overloadDecl) matchesRuntimeUnarySignature(arg ref.Val) bool { + if o.nonStrict && types.IsUnknownOrError(arg) { + return true + } + return o.argTypes[0].IsAssignableRuntimeType(arg) && (o.operandTrait == 0 || arg.Type().HasTrait(o.operandTrait)) +} + +// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *overloadDecl) matchesRuntimeBinarySignature(arg1, arg2 ref.Val) bool { + if o.nonStrict { + if types.IsUnknownOrError(arg1) { + return types.IsUnknownOrError(arg2) || o.argTypes[1].IsAssignableRuntimeType(arg2) + } + } else if !o.argTypes[1].IsAssignableRuntimeType(arg2) { + return false + } + return o.argTypes[0].IsAssignableRuntimeType(arg1) && (o.operandTrait == 0 || arg1.Type().HasTrait(o.operandTrait)) +} + +// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *overloadDecl) matchesRuntimeSignature(args ...ref.Val) bool { + if len(args) != len(o.argTypes) { + return false + } + if len(args) == 0 { + return true + } + allArgsMatch := true + for i, arg := range args { + if o.nonStrict && types.IsUnknownOrError(arg) { + continue + } + allArgsMatch = allArgsMatch && o.argTypes[i].IsAssignableRuntimeType(arg) + } + + arg := args[0] + return allArgsMatch && (o.operandTrait == 0 || (o.nonStrict && types.IsUnknownOrError(arg)) || arg.Type().HasTrait(o.operandTrait)) +} + +// signatureEquals indicates whether one overload has an identical signature to another overload. +// +// Providing a duplicate signature is not an issue, but an overloapping signature is problematic. +func (o *overloadDecl) signatureEquals(other *overloadDecl) bool { + if o.id != other.id || o.memberFunction != other.memberFunction || len(o.argTypes) != len(other.argTypes) { + return false + } + for i, at := range o.argTypes { + oat := other.argTypes[i] + if !at.equals(oat) { + return false + } + } + return o.resultType.equals(other.resultType) +} + +// signatureOverlaps indicates whether one overload has an overlapping signature with another overload. +// +// The 'other' overload must first be checked for equality before determining whether it overlaps in order to be completely accurate. +func (o *overloadDecl) signatureOverlaps(other *overloadDecl) bool { + if o.memberFunction != other.memberFunction || len(o.argTypes) != len(other.argTypes) { + return false + } + argsOverlap := true + for i, argType := range o.argTypes { + otherArgType := other.argTypes[i] + argsOverlap = argsOverlap && + (argType.IsAssignableType(otherArgType) || + otherArgType.IsAssignableType(argType)) + } + return argsOverlap +} + +func newOverload(overloadID string, memberFunction bool, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { + return func(f *functionDecl) (*functionDecl, error) { + overload := &overloadDecl{ + id: overloadID, + argTypes: args, + resultType: resultType, + memberFunction: memberFunction, + } + var err error + for _, opt := range opts { + overload, err = opt(overload) + if err != nil { + return nil, err + } + } + err = f.addOverload(overload) + if err != nil { + return nil, err + } + return f, nil + } +} + +func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { + if t.IsAssignableType(NullType) { + return decls.NewWrapperType(pbType) + } + return pbType } // TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. func TypeToExprType(t *Type) (*exprpb.Type, error) { - return types.TypeToExprType(t) + switch t.kind { + case AnyKind: + return decls.Any, nil + case BoolKind: + return maybeWrapper(t, decls.Bool), nil + case BytesKind: + return maybeWrapper(t, decls.Bytes), nil + case DoubleKind: + return maybeWrapper(t, decls.Double), nil + case DurationKind: + return decls.Duration, nil + case DynKind: + return decls.Dyn, nil + case IntKind: + return maybeWrapper(t, decls.Int), nil + case ListKind: + et, err := TypeToExprType(t.parameters[0]) + if err != nil { + return nil, err + } + return decls.NewListType(et), nil + case MapKind: + kt, err := TypeToExprType(t.parameters[0]) + if err != nil { + return nil, err + } + vt, err := TypeToExprType(t.parameters[1]) + if err != nil { + return nil, err + } + return decls.NewMapType(kt, vt), nil + case NullTypeKind: + return decls.Null, nil + case OpaqueKind: + params := make([]*exprpb.Type, len(t.parameters)) + for i, p := range t.parameters { + pt, err := TypeToExprType(p) + if err != nil { + return nil, err + } + params[i] = pt + } + return decls.NewAbstractType(t.runtimeType.TypeName(), params...), nil + case StringKind: + return maybeWrapper(t, decls.String), nil + case StructKind: + switch t.runtimeType.TypeName() { + case "google.protobuf.Any": + return decls.Any, nil + case "google.protobuf.Duration": + return decls.Duration, nil + case "google.protobuf.Timestamp": + return decls.Timestamp, nil + case "google.protobuf.Value": + return decls.Dyn, nil + case "google.protobuf.ListValue": + return decls.NewListType(decls.Dyn), nil + case "google.protobuf.Struct": + return decls.NewMapType(decls.String, decls.Dyn), nil + case "google.protobuf.BoolValue": + return decls.NewWrapperType(decls.Bool), nil + case "google.protobuf.BytesValue": + return decls.NewWrapperType(decls.Bytes), nil + case "google.protobuf.DoubleValue", "google.protobuf.FloatValue": + return decls.NewWrapperType(decls.Double), nil + case "google.protobuf.Int32Value", "google.protobuf.Int64Value": + return decls.NewWrapperType(decls.Int), nil + case "google.protobuf.StringValue": + return decls.NewWrapperType(decls.String), nil + case "google.protobuf.UInt32Value", "google.protobuf.UInt64Value": + return decls.NewWrapperType(decls.Uint), nil + default: + return decls.NewObjectType(t.runtimeType.TypeName()), nil + } + case TimestampKind: + return decls.Timestamp, nil + case TypeParamKind: + return decls.NewTypeParamType(t.runtimeType.TypeName()), nil + case TypeKind: + return decls.NewTypeType(decls.Dyn), nil + case UintKind: + return maybeWrapper(t, decls.Uint), nil + } + return nil, fmt.Errorf("missing type conversion to proto: %v", t) } // ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. func ExprTypeToType(t *exprpb.Type) (*Type, error) { - return types.ExprTypeToType(t) + switch t.GetTypeKind().(type) { + case *exprpb.Type_Dyn: + return DynType, nil + case *exprpb.Type_AbstractType_: + paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) + for i, p := range t.GetAbstractType().GetParameterTypes() { + pt, err := ExprTypeToType(p) + if err != nil { + return nil, err + } + paramTypes[i] = pt + } + return OpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil + case *exprpb.Type_ListType_: + et, err := ExprTypeToType(t.GetListType().GetElemType()) + if err != nil { + return nil, err + } + return ListType(et), nil + case *exprpb.Type_MapType_: + kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) + if err != nil { + return nil, err + } + vt, err := ExprTypeToType(t.GetMapType().GetValueType()) + if err != nil { + return nil, err + } + return MapType(kt, vt), nil + case *exprpb.Type_MessageType: + switch t.GetMessageType() { + case "google.protobuf.Any": + return AnyType, nil + case "google.protobuf.Duration": + return DurationType, nil + case "google.protobuf.Timestamp": + return TimestampType, nil + case "google.protobuf.Value": + return DynType, nil + case "google.protobuf.ListValue": + return ListType(DynType), nil + case "google.protobuf.Struct": + return MapType(StringType, DynType), nil + case "google.protobuf.BoolValue": + return NullableType(BoolType), nil + case "google.protobuf.BytesValue": + return NullableType(BytesType), nil + case "google.protobuf.DoubleValue", "google.protobuf.FloatValue": + return NullableType(DoubleType), nil + case "google.protobuf.Int32Value", "google.protobuf.Int64Value": + return NullableType(IntType), nil + case "google.protobuf.StringValue": + return NullableType(StringType), nil + case "google.protobuf.UInt32Value", "google.protobuf.UInt64Value": + return NullableType(UintType), nil + default: + return ObjectType(t.GetMessageType()), nil + } + case *exprpb.Type_Null: + return NullType, nil + case *exprpb.Type_Primitive: + switch t.GetPrimitive() { + case exprpb.Type_BOOL: + return BoolType, nil + case exprpb.Type_BYTES: + return BytesType, nil + case exprpb.Type_DOUBLE: + return DoubleType, nil + case exprpb.Type_INT64: + return IntType, nil + case exprpb.Type_STRING: + return StringType, nil + case exprpb.Type_UINT64: + return UintType, nil + default: + return nil, fmt.Errorf("unsupported primitive type: %v", t) + } + case *exprpb.Type_TypeParam: + return TypeParamType(t.GetTypeParam()), nil + case *exprpb.Type_Type: + return TypeType, nil + case *exprpb.Type_WellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return AnyType, nil + case exprpb.Type_DURATION: + return DurationType, nil + case exprpb.Type_TIMESTAMP: + return TimestampType, nil + default: + return nil, fmt.Errorf("unsupported well-known type: %v", t) + } + case *exprpb.Type_Wrapper: + t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) + if err != nil { + return nil, err + } + return NullableType(t), nil + default: + return nil, fmt.Errorf("unsupported type: %v", t) + } } // ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. @@ -319,42 +1130,82 @@ func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { for i, o := range overloads { args := make([]*Type, len(o.GetParams())) for j, p := range o.GetParams() { - a, err := types.ExprTypeToType(p) + a, err := ExprTypeToType(p) if err != nil { return nil, err } args[j] = a } - res, err := types.ExprTypeToType(o.GetResultType()) + res, err := ExprTypeToType(o.GetResultType()) if err != nil { return nil, err } - if o.IsInstanceFunction { - opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res) - } else { - opts[i] = decls.Overload(o.GetOverloadId(), args, res) - } + opts[i] = Overload(o.GetOverloadId(), args, res) } return Function(d.GetName(), opts...), nil case *exprpb.Decl_Ident: - t, err := types.ExprTypeToType(d.GetIdent().GetType()) + t, err := ExprTypeToType(d.GetIdent().GetType()) if err != nil { return nil, err } - if d.GetIdent().GetValue() == nil { - return Variable(d.GetName(), t), nil + return Variable(d.GetName(), t), nil + default: + return nil, fmt.Errorf("unsupported decl: %v", d) + } + +} + +func functionDeclToExprDecl(f *functionDecl) (*exprpb.Decl, error) { + overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) + i := 0 + for _, o := range f.overloads { + paramNames := map[string]struct{}{} + argTypes := make([]*exprpb.Type, len(o.argTypes)) + for j, a := range o.argTypes { + collectParamNames(paramNames, a) + at, err := TypeToExprType(a) + if err != nil { + return nil, err + } + argTypes[j] = at } - val, err := ast.ConstantToVal(d.GetIdent().GetValue()) + collectParamNames(paramNames, o.resultType) + resultType, err := TypeToExprType(o.resultType) if err != nil { return nil, err } - return Constant(d.GetName(), t, val), nil - default: - return nil, fmt.Errorf("unsupported decl: %v", d) + if len(paramNames) == 0 { + if o.memberFunction { + overloads[i] = decls.NewInstanceOverload(o.id, argTypes, resultType) + } else { + overloads[i] = decls.NewOverload(o.id, argTypes, resultType) + } + } else { + params := []string{} + for pn := range paramNames { + params = append(params, pn) + } + if o.memberFunction { + overloads[i] = decls.NewParameterizedInstanceOverload(o.id, argTypes, resultType, params) + } else { + overloads[i] = decls.NewParameterizedOverload(o.id, argTypes, resultType, params) + } + } + i++ + } + return decls.NewFunction(f.name, overloads...), nil +} + +func collectParamNames(paramNames map[string]struct{}, arg *Type) { + if arg.kind == TypeParamKind { + paramNames[arg.runtimeType.TypeName()] = struct{}{} + } + for _, param := range arg.parameters { + collectParamNames(paramNames, param) } } -func typeValueToKind(tv ref.Type) (Kind, error) { +func typeValueToKind(tv *types.TypeValue) (Kind, error) { switch tv { case types.BoolType: return BoolKind, nil diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go index b5c3b4cc556..d9c2ef63f27 100644 --- a/vendor/github.com/google/cel-go/cel/env.go +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -16,14 +16,13 @@ package cel import ( "errors" + "fmt" "sync" "github.com/google/cel-go/checker" - chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common" - celast "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" @@ -41,8 +40,8 @@ type Ast struct { expr *exprpb.Expr info *exprpb.SourceInfo source Source - refMap map[int64]*celast.ReferenceInfo - typeMap map[int64]*types.Type + refMap map[int64]*exprpb.Reference + typeMap map[int64]*exprpb.Type } // Expr returns the proto serializable instance of the parsed/checked expression. @@ -61,26 +60,21 @@ func (ast *Ast) SourceInfo() *exprpb.SourceInfo { } // ResultType returns the output type of the expression if the Ast has been type-checked, else -// returns chkdecls.Dyn as the parse step cannot infer the type. +// returns decls.Dyn as the parse step cannot infer the type. // // Deprecated: use OutputType func (ast *Ast) ResultType() *exprpb.Type { if !ast.IsChecked() { - return chkdecls.Dyn + return decls.Dyn } - out := ast.OutputType() - t, err := TypeToExprType(out) - if err != nil { - return chkdecls.Dyn - } - return t + return ast.typeMap[ast.expr.GetId()] } // OutputType returns the output type of the expression if the Ast has been type-checked, else // returns cel.DynType as the parse step cannot infer types. func (ast *Ast) OutputType() *Type { - t, found := ast.typeMap[ast.expr.GetId()] - if !found { + t, err := ExprTypeToType(ast.ResultType()) + if err != nil { return DynType } return t @@ -93,33 +87,22 @@ func (ast *Ast) Source() Source { } // FormatType converts a type message into a string representation. -// -// Deprecated: prefer FormatCELType func FormatType(t *exprpb.Type) string { return checker.FormatCheckedType(t) } -// FormatCELType formats a cel.Type value to a string representation. -// -// The type formatting is identical to FormatType. -func FormatCELType(t *Type) string { - return checker.FormatCELType(t) -} - // Env encapsulates the context necessary to perform parsing, type checking, or generation of // evaluable programs for different expressions. type Env struct { Container *containers.Container - variables []*decls.VariableDecl - functions map[string]*decls.FunctionDecl + functions map[string]*functionDecl + declarations []*exprpb.Decl macros []parser.Macro - adapter types.Adapter - provider types.Provider + adapter ref.TypeAdapter + provider ref.TypeProvider features map[int]bool appliedFeatures map[int]bool libraries map[string]bool - validators []ASTValidator - costOptions []checker.CostOption // Internal parser representation prsr *parser.Parser @@ -171,8 +154,8 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { return nil, err } return (&Env{ - variables: []*decls.VariableDecl{}, - functions: map[string]*decls.FunctionDecl{}, + declarations: []*exprpb.Decl{}, + functions: map[string]*functionDecl{}, macros: []parser.Macro{}, Container: containers.DefaultContainer, adapter: registry, @@ -180,20 +163,14 @@ func NewCustomEnv(opts ...EnvOption) (*Env, error) { features: map[int]bool{}, appliedFeatures: map[int]bool{}, libraries: map[string]bool{}, - validators: []ASTValidator{}, progOpts: []ProgramOption{}, - costOptions: []checker.CostOption{}, }).configure(opts) } // Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues. -// If any `ASTValidators` are configured on the environment, they will be applied after a valid -// type-check result. If any issues are detected, the validators will provide them on the -// output Issues object. // -// Either checking or validation has failed if the returned Issues value and its Issues.Err() -// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a -// fatal error. +// Checking has failed if the returned Issues value and its Issues.Err() value are non-nil. +// Issues should be inspected if they are non-nil, but may not represent a fatal error. // // It is possible to have both non-nil Ast and Issues values returned from this call: however, // the mere presence of an Ast does not imply that it is valid for use. @@ -206,38 +183,21 @@ func (e *Env) Check(ast *Ast) (*Ast, *Issues) { if err != nil { errs := common.NewErrors(ast.Source()) errs.ReportError(common.NoLocation, err.Error()) - return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + return nil, NewIssues(errs) } res, errs := checker.Check(pe, ast.Source(), chk) if len(errs.GetErrors()) > 0 { - return nil, NewIssuesWithSourceInfo(errs, ast.SourceInfo()) + return nil, NewIssues(errs) } // Manually create the Ast to ensure that the Ast source information (which may be more // detailed than the information provided by Check), is returned to the caller. - ast = &Ast{ + return &Ast{ source: ast.Source(), - expr: res.Expr, - info: res.SourceInfo, - refMap: res.ReferenceMap, - typeMap: res.TypeMap} - - // Generate a validator configuration from the set of configured validators. - vConfig := newValidatorConfig() - for _, v := range e.validators { - if cv, ok := v.(ASTValidatorConfigurer); ok { - cv.Configure(vConfig) - } - } - // Apply additional validators on the type-checked result. - iss := NewIssuesWithSourceInfo(errs, ast.SourceInfo()) - for _, v := range e.validators { - v.Validate(e, vConfig, res, iss) - } - if iss.Err() != nil { - return nil, iss - } - return ast, nil + expr: res.GetExpr(), + info: res.GetSourceInfo(), + refMap: res.GetReferenceMap(), + typeMap: res.GetTypeMap()}, nil } // Compile combines the Parse and Check phases CEL program compilation to produce an Ast and @@ -295,7 +255,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { copy(chkOptsCopy, e.chkOpts) // Copy the declarations if needed. - varsCopy := []*decls.VariableDecl{} + decsCopy := []*exprpb.Decl{} if chk != nil { // If the type-checker has already been instantiated, then the e.declarations have been // validated within the chk instance. @@ -303,8 +263,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { } else { // If the type-checker has not been instantiated, ensure the unvalidated declarations are // provided to the extended Env instance. - varsCopy = make([]*decls.VariableDecl, len(e.variables)) - copy(varsCopy, e.variables) + decsCopy = make([]*exprpb.Decl, len(e.declarations)) + copy(decsCopy, e.declarations) } // Copy macros and program options @@ -316,8 +276,8 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { // Copy the adapter / provider if they appear to be mutable. adapter := e.adapter provider := e.provider - adapterReg, isAdapterReg := e.adapter.(*types.Registry) - providerReg, isProviderReg := e.provider.(*types.Registry) + adapterReg, isAdapterReg := e.adapter.(ref.TypeRegistry) + providerReg, isProviderReg := e.provider.(ref.TypeRegistry) // In most cases the provider and adapter will be a ref.TypeRegistry; // however, in the rare cases where they are not, they are assumed to // be immutable. Since it is possible to set the TypeProvider separately @@ -348,7 +308,7 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { for k, v := range e.appliedFeatures { appliedFeaturesCopy[k] = v } - funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions)) + funcsCopy := make(map[string]*functionDecl, len(e.functions)) for k, v := range e.functions { funcsCopy[k] = v } @@ -356,14 +316,10 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { for k, v := range e.libraries { libsCopy[k] = v } - validatorsCopy := make([]ASTValidator, len(e.validators)) - copy(validatorsCopy, e.validators) - costOptsCopy := make([]checker.CostOption, len(e.costOptions)) - copy(costOptsCopy, e.costOptions) ext := &Env{ Container: e.Container, - variables: varsCopy, + declarations: decsCopy, functions: funcsCopy, macros: macsCopy, progOpts: progOptsCopy, @@ -371,11 +327,9 @@ func (e *Env) Extend(opts ...EnvOption) (*Env, error) { features: featuresCopy, appliedFeatures: appliedFeaturesCopy, libraries: libsCopy, - validators: validatorsCopy, provider: provider, chkOpts: chkOptsCopy, prsrOpts: prsrOptsCopy, - costOptions: costOptsCopy, } return ext.configure(opts) } @@ -393,25 +347,6 @@ func (e *Env) HasLibrary(libName string) bool { return exists && configured } -// Libraries returns a list of SingletonLibrary that have been configured in the environment. -func (e *Env) Libraries() []string { - libraries := make([]string, 0, len(e.libraries)) - for libName := range e.libraries { - libraries = append(libraries, libName) - } - return libraries -} - -// HasValidator returns whether a specific ASTValidator has been configured in the environment. -func (e *Env) HasValidator(name string) bool { - for _, v := range e.validators { - if v.Name() == name { - return true - } - } - return false -} - // Parse parses the input expression value `txt` to a Ast and/or a set of Issues. // // This form of Parse creates a Source value for the input `txt` and forwards to the @@ -453,62 +388,34 @@ func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { return newProgram(e, ast, optSet) } -// CELTypeAdapter returns the `types.Adapter` configured for the environment. -func (e *Env) CELTypeAdapter() types.Adapter { - return e.adapter -} - -// CELTypeProvider returns the `types.Provider` configured for the environment. -func (e *Env) CELTypeProvider() types.Provider { - return e.provider -} - // TypeAdapter returns the `ref.TypeAdapter` configured for the environment. -// -// Deprecated: use CELTypeAdapter() func (e *Env) TypeAdapter() ref.TypeAdapter { return e.adapter } // TypeProvider returns the `ref.TypeProvider` configured for the environment. -// -// Deprecated: use CELTypeProvider() func (e *Env) TypeProvider() ref.TypeProvider { - if legacyProvider, ok := e.provider.(ref.TypeProvider); ok { - return legacyProvider - } - return &interopLegacyTypeProvider{Provider: e.provider} + return e.provider } -// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the -// Env as unknown AttributePattern values. +// UnknownVars returns an interpreter.PartialActivation which marks all variables +// declared in the Env as unknown AttributePattern values. // -// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the -// PartialAttributes option is provided as a ProgramOption. +// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation +// unless the PartialAttributes option is provided as a ProgramOption. func (e *Env) UnknownVars() interpreter.PartialActivation { - act := interpreter.EmptyActivation() - part, _ := PartialVars(act, e.computeUnknownVars(act)...) - return part -} - -// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable -// set, but which have been configured in the environment, are marked as unknown. -// -// The `vars` value may either be an interpreter.Activation or any valid input to the -// interpreter.NewActivation call. -// -// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown -// variables. For more advanced use cases of partial state where portions of an object graph, rather -// than top-level variables, are missing the PartialVars() method may be a more suitable choice. -// -// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the -// PartialAttributes option is provided as a ProgramOption. -func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { - act, err := interpreter.NewActivation(vars) - if err != nil { - return nil, err + var unknownPatterns []*interpreter.AttributePattern + for _, d := range e.declarations { + switch d.GetDeclKind().(type) { + case *exprpb.Decl_Ident: + unknownPatterns = append(unknownPatterns, + interpreter.NewAttributePattern(d.GetName())) + } } - return PartialVars(act, e.computeUnknownVars(act)...) + part, _ := PartialVars( + interpreter.EmptyActivation(), + unknownPatterns...) + return part } // ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the @@ -556,16 +463,11 @@ func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { // EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and // extension functions provided by estimator. func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { - checked := &celast.CheckedAST{ - Expr: ast.Expr(), - SourceInfo: ast.SourceInfo(), - TypeMap: ast.typeMap, - ReferenceMap: ast.refMap, + checked, err := AstToCheckedExpr(ast) + if err != nil { + return checker.CostEstimate{}, fmt.Errorf("EsimateCost could not inspect Ast: %v", err) } - extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) - extendedOpts = append(extendedOpts, opts...) - extendedOpts = append(extendedOpts, e.costOptions...) - return checker.Cost(checked, estimator, extendedOpts...) + return checker.Cost(checked, estimator, opts...) } // configure applies a series of EnvOptions to the current environment. @@ -586,6 +488,14 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { return nil, err } + // Initialize all of the functions configured within the environment. + for _, fn := range e.functions { + err = fn.init() + if err != nil { + return nil, err + } + } + // Configure the parser. prsrOpts := []parser.Option{} prsrOpts = append(prsrOpts, e.prsrOpts...) @@ -594,9 +504,6 @@ func (e *Env) configure(opts []EnvOption) (*Env, error) { if e.HasFeature(featureEnableMacroCallTracking) { prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true)) } - if e.HasFeature(featureVariadicLogicalASTs) { - prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true)) - } e.prsr, err = parser.NewParser(prsrOpts...) if err != nil { return nil, err @@ -618,6 +525,8 @@ func (e *Env) initChecker() (*checker.Env, error) { chkOpts := []checker.Option{} chkOpts = append(chkOpts, e.chkOpts...) chkOpts = append(chkOpts, + checker.HomogeneousAggregateLiterals( + e.HasFeature(featureDisableDynamicAggregateLiterals)), checker.CrossTypeNumericComparisons( e.HasFeature(featureCrossTypeNumericComparisons))) @@ -627,17 +536,19 @@ func (e *Env) initChecker() (*checker.Env, error) { return } // Add the statically configured declarations. - err = ce.AddIdents(e.variables...) + err = ce.Add(e.declarations...) if err != nil { e.setCheckerOrError(nil, err) return } // Add the function declarations which are derived from the FunctionDecl instances. for _, fn := range e.functions { - if fn.IsDeclarationDisabled() { - continue + fnDecl, err := functionDeclToExprDecl(fn) + if err != nil { + e.setCheckerOrError(nil, err) + return } - err = ce.AddFunctions(fn) + err = ce.Add(fnDecl) if err != nil { e.setCheckerOrError(nil, err) return @@ -685,43 +596,17 @@ func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) { return e, nil } -// computeUnknownVars determines a set of missing variables based on the input activation and the -// environment's configured declaration set. -func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern { - var unknownPatterns []*interpreter.AttributePattern - for _, v := range e.variables { - varName := v.Name() - if _, found := vars.ResolveName(varName); found { - continue - } - unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName)) - } - return unknownPatterns -} - -// Error type which references an expression id, a location within source, and a message. -type Error = common.Error - // Issues defines methods for inspecting the error details of parse and check calls. // // Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. type Issues struct { errs *common.Errors - info *exprpb.SourceInfo } // NewIssues returns an Issues struct from a common.Errors object. func NewIssues(errs *common.Errors) *Issues { - return NewIssuesWithSourceInfo(errs, nil) -} - -// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata -// which can be used with the `ReportErrorAtID` method for additional error reports within the context -// information that's inferred from an expression id. -func NewIssuesWithSourceInfo(errs *common.Errors, info *exprpb.SourceInfo) *Issues { return &Issues{ errs: errs, - info: info, } } @@ -737,9 +622,9 @@ func (i *Issues) Err() error { } // Errors returns the collection of errors encountered in more granular detail. -func (i *Issues) Errors() []*Error { +func (i *Issues) Errors() []common.Error { if i == nil { - return []*Error{} + return []common.Error{} } return i.errs.GetErrors() } @@ -763,37 +648,6 @@ func (i *Issues) String() string { return i.errs.ToDisplayString() } -// ReportErrorAtID reports an error message with an optional set of formatting arguments. -// -// The source metadata for the expression at `id`, if present, is attached to the error report. -// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo. -func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) { - i.errs.ReportErrorAtID(id, locationByID(id, i.info), message, args...) -} - -// locationByID returns a common.Location given an expression id. -// -// TODO: move this functionality into the native SourceInfo and an overhaul of the common.Source -// as this implementation relies on the abstractions present in the protobuf SourceInfo object, -// and is replicated in the checker. -func locationByID(id int64, sourceInfo *exprpb.SourceInfo) common.Location { - positions := sourceInfo.GetPositions() - var line = 1 - if offset, found := positions[id]; found { - col := int(offset) - for _, lineOffset := range sourceInfo.GetLineOffsets() { - if lineOffset < offset { - line++ - col = int(offset - lineOffset) - } else { - break - } - } - return common.NewLocation(line, col) - } - return common.NoLocation -} - // getStdEnv lazy initializes the CEL standard environment. func getStdEnv() (*Env, error) { stdEnvInit.Do(func() { @@ -802,90 +656,6 @@ func getStdEnv() (*Env, error) { return stdEnv, stdEnvErr } -// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider. -type interopCELTypeProvider struct { - ref.TypeProvider -} - -// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. -// -// This method proxies to the underyling ref.TypeProvider's FindType method and converts protobuf type -// into a native type representation. If the conversion fails, the type is listed as not found. -func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { - if et, found := p.FindType(typeName); found { - t, err := types.ExprTypeToType(et) - if err != nil { - return nil, false - } - return t, true - } - return nil, false -} - -// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field -// name, if one exists. -// -// This method proxies to the underyling ref.TypeProvider's FindFieldType method and converts protobuf type -// into a native type representation. If the conversion fails, the type is listed as not found. -func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { - if ft, found := p.FindFieldType(structType, fieldName); found { - t, err := types.ExprTypeToType(ft.Type) - if err != nil { - return nil, false - } - return &types.FieldType{ - Type: t, - IsSet: ft.IsSet, - GetFrom: ft.GetFrom, - }, true - } - return nil, false -} - -// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider. -type interopLegacyTypeProvider struct { - types.Provider -} - -// FindType retruns the protobuf Type representation for the input type name if one exists. -// -// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type -// value to a protobuf Type representation. -// -// Failure to convert the type will result in the type not being found. -func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { - if t, found := p.FindStructType(typeName); found { - et, err := types.TypeToExprType(t) - if err != nil { - return nil, false - } - return et, true - } - return nil, false -} - -// FindFieldType returns the protobuf-based FieldType representation for the input type name and field, -// if one exists. -// -// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType -// value to a protobuf-based ref.FieldType representation if found. -// -// Failure to convert the FieldType will result in the field not being found. -func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { - if cft, found := p.FindStructFieldType(structType, fieldName); found { - et, err := types.TypeToExprType(cft.Type) - if err != nil { - return nil, false - } - return &ref.FieldType{ - Type: et, - IsSet: cft.IsSet, - GetFrom: cft.GetFrom, - }, true - } - return nil, false -} - var ( stdEnvInit sync.Once stdEnv *Env diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go index 80f63140e30..93ded3cf1b7 100644 --- a/vendor/github.com/google/cel-go/cel/io.go +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -22,7 +22,6 @@ import ( "google.golang.org/protobuf/proto" "github.com/google/cel-go/common" - "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" @@ -34,8 +33,7 @@ import ( // CheckedExprToAst converts a checked expression proto message to an Ast. func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { - checked, _ := CheckedExprToAstWithSource(checkedExpr, nil) - return checked + return CheckedExprToAstWithSource(checkedExpr, nil) } // CheckedExprToAstWithSource converts a checked expression proto message to an Ast, @@ -46,18 +44,29 @@ func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { // through future calls. // // Prefer CheckedExprToAst if loading expressions from storage. -func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) { - checkedAST, err := ast.CheckedExprToCheckedAST(checkedExpr) - if err != nil { - return nil, err +func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) *Ast { + refMap := checkedExpr.GetReferenceMap() + if refMap == nil { + refMap = map[int64]*exprpb.Reference{} + } + typeMap := checkedExpr.GetTypeMap() + if typeMap == nil { + typeMap = map[int64]*exprpb.Type{} + } + si := checkedExpr.GetSourceInfo() + if si == nil { + si = &exprpb.SourceInfo{} + } + if src == nil { + src = common.NewInfoSource(si) } return &Ast{ - expr: checkedAST.Expr, - info: checkedAST.SourceInfo, + expr: checkedExpr.GetExpr(), + info: si, source: src, - refMap: checkedAST.ReferenceMap, - typeMap: checkedAST.TypeMap, - }, nil + refMap: refMap, + typeMap: typeMap, + } } // AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. @@ -67,13 +76,12 @@ func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { if !a.IsChecked() { return nil, fmt.Errorf("cannot convert unchecked ast") } - cAst := &ast.CheckedAST{ - Expr: a.expr, - SourceInfo: a.info, + return &exprpb.CheckedExpr{ + Expr: a.Expr(), + SourceInfo: a.SourceInfo(), ReferenceMap: a.refMap, TypeMap: a.typeMap, - } - return ast.CheckedASTToCheckedExpr(cAst) + }, nil } // ParsedExprToAst converts a parsed expression proto message to an Ast. @@ -194,7 +202,7 @@ func RefValueToValue(res ref.Val) (*exprpb.Value, error) { } var ( - typeNameToTypeValue = map[string]ref.Val{ + typeNameToTypeValue = map[string]*types.TypeValue{ "bool": types.BoolType, "bytes": types.BytesType, "double": types.DoubleType, @@ -211,7 +219,7 @@ var ( ) // ValueToRefValue converts between exprpb.Value and ref.Val. -func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { +func ValueToRefValue(adapter ref.TypeAdapter, v *exprpb.Value) (ref.Val, error) { switch v.Kind.(type) { case *exprpb.Value_NullValue: return types.NullValue, nil diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go index 4d232085c24..bcfd44f78a9 100644 --- a/vendor/github.com/google/cel-go/cel/library.go +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -15,18 +15,19 @@ package cel import ( - "math" "strconv" "strings" "time" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/stdlib" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/interpreter/functions" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -34,7 +35,6 @@ import ( const ( optMapMacro = "optMap" - optFlatMapMacro = "optFlatMap" hasValueFunc = "hasValue" optionalNoneFunc = "optional.none" optionalOfFunc = "optional.of" @@ -106,213 +106,44 @@ func (stdLibrary) LibraryName() string { return "cel.lib.std" } -// CompileOptions returns options for the standard CEL function declarations and macros. +// EnvOptions returns options for the standard CEL function declarations and macros. func (stdLibrary) CompileOptions() []EnvOption { return []EnvOption{ - func(e *Env) (*Env, error) { - var err error - for _, fn := range stdlib.Functions() { - existing, found := e.functions[fn.Name()] - if found { - fn, err = existing.Merge(fn) - if err != nil { - return nil, err - } - } - e.functions[fn.Name()] = fn - } - return e, nil - }, - func(e *Env) (*Env, error) { - e.variables = append(e.variables, stdlib.Types()...) - return e, nil - }, + Declarations(checker.StandardDeclarations()...), Macros(StandardMacros...), } } // ProgramOptions returns function implementations for the standard CEL functions. func (stdLibrary) ProgramOptions() []ProgramOption { - return []ProgramOption{} -} - -// OptionalTypes enable support for optional syntax and types in CEL. -// -// The optional value type makes it possible to express whether variables have -// been provided, whether a result has been computed, and in the future whether -// an object field path, map key value, or list index has a value. -// -// # Syntax Changes -// -// OptionalTypes are unlike other CEL extensions because they modify the CEL -// syntax itself, notably through the use of a `?` preceding a field name or -// index value. -// -// ## Field Selection -// -// The optional syntax in field selection is denoted as `obj.?field`. In other -// words, if a field is set, return `optional.of(obj.field)“, else -// `optional.none()`. The optional field selection is viral in the sense that -// after the first optional selection all subsequent selections or indices -// are treated as optional, i.e. the following expressions are equivalent: -// -// obj.?field.subfield -// obj.?field.?subfield -// -// ## Indexing -// -// Similar to field selection, the optional syntax can be used in index -// expressions on maps and lists: -// -// list[?0] -// map[?key] -// -// ## Optional Field Setting -// -// When creating map or message literals, if a field may be optionally set -// based on its presence, then placing a `?` before the field name or key -// will ensure the type on the right-hand side must be optional(T) where T -// is the type of the field or key-value. -// -// The following returns a map with the key expression set only if the -// subfield is present, otherwise an empty map is created: -// -// {?key: obj.?field.subfield} -// -// ## Optional Element Setting -// -// When creating list literals, an element in the list may be optionally added -// when the element expression is preceded by a `?`: -// -// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c] -// -// # Optional.Of -// -// Create an optional(T) value of a given value with type T. -// -// optional.of(10) -// -// # Optional.OfNonZeroValue -// -// Create an optional(T) value of a given value with type T if it is not a -// zero-value. A zero-value the default empty value for any given CEL type, -// including empty protobuf message types. If the value is empty, the result -// of this call will be optional.none(). -// -// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int)) -// optional.ofNonZeroValue([]) // optional.none() -// optional.ofNonZeroValue(0) // optional.none() -// optional.ofNonZeroValue("") // optional.none() -// -// # Optional.None -// -// Create an empty optional value. -// -// # HasValue -// -// Determine whether the optional contains a value. -// -// optional.of(b'hello').hasValue() // true -// optional.ofNonZeroValue({}).hasValue() // false -// -// # Value -// -// Get the value contained by the optional. If the optional does not have a -// value, the result will be a CEL error. -// -// optional.of(b'hello').value() // b'hello' -// optional.ofNonZeroValue({}).value() // error -// -// # Or -// -// If the value on the left-hand side is optional.none(), the optional value -// on the right hand side is returned. If the value on the left-hand set is -// valued, then it is returned. This operation is short-circuiting and will -// only evaluate as many links in the `or` chain as are needed to return a -// non-empty optional value. -// -// obj.?field.or(m[?key]) -// l[?index].or(obj.?field.subfield).or(obj.?other) -// -// # OrValue -// -// Either return the value contained within the optional on the left-hand side -// or return the alternative value on the right hand side. -// -// m[?key].orValue("none") -// -// # OptMap -// -// Apply a transformation to the optional's underlying value if it is not empty -// and return an optional typed result based on the transformation. The -// transformation expression type must return a type T which is wrapped into -// an optional. -// -// msg.?elements.optMap(e, e.size()).orValue(0) -// -// # OptFlatMap -// -// Introduced in version: 1 -// -// Apply a transformation to the optional's underlying value if it is not empty -// and return the result. The transform expression must return an optional(T) -// rather than type T. This can be useful when dealing with zero values and -// conditionally generating an empty or non-empty result in ways which cannot -// be expressed with `optMap`. -// -// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present. -func OptionalTypes(opts ...OptionalTypesOption) EnvOption { - lib := &optionalLib{version: math.MaxUint32} - for _, opt := range opts { - lib = opt(lib) + return []ProgramOption{ + Functions(functions.StandardOverloads()...), } - return Lib(lib) } -type optionalLib struct { - version uint32 -} - -// OptionalTypesOption is a functional interface for configuring the strings library. -type OptionalTypesOption func(*optionalLib) *optionalLib - -// OptionalTypesVersion configures the version of the optional type library. -// -// The version limits which functions are available. Only functions introduced -// below or equal to the given version included in the library. If this option -// is not set, all functions are available. -// -// See the library documentation to determine which version a function was introduced. -// If the documentation does not state which version a function was introduced, it can -// be assumed to be introduced at version 0, when the library was first created. -func OptionalTypesVersion(version uint32) OptionalTypesOption { - return func(lib *optionalLib) *optionalLib { - lib.version = version - return lib - } -} +type optionalLibrary struct{} // LibraryName implements the SingletonLibrary interface method. -func (lib *optionalLib) LibraryName() string { +func (optionalLibrary) LibraryName() string { return "cel.lib.optional" } // CompileOptions implements the Library interface method. -func (lib *optionalLib) CompileOptions() []EnvOption { +func (optionalLibrary) CompileOptions() []EnvOption { paramTypeK := TypeParamType("K") paramTypeV := TypeParamType("V") optionalTypeV := OptionalType(paramTypeV) listTypeV := ListType(paramTypeV) mapTypeKV := MapType(paramTypeK, paramTypeV) - opts := []EnvOption{ + return []EnvOption{ // Enable the optional syntax in the parser. enableOptionalSyntax(), // Introduce the optional type. Types(types.OptionalType), - // Configure the optMap and optFlatMap macros. + // Configure the optMap macro. Macros(NewReceiverMacro(optMapMacro, 2, optMap)), // Global and member functions for working with optional values. @@ -371,29 +202,21 @@ func (lib *optionalLib) CompileOptions() []EnvOption { // Index overloads to accommodate using an optional value as the operand. Function(operators.Index, Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), - Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), - } - if lib.version >= 1 { - opts = append(opts, Macros(NewReceiverMacro(optFlatMapMacro, 2, optFlatMap))) - } - return opts -} - -// ProgramOptions implements the Library interface method. -func (lib *optionalLib) ProgramOptions() []ProgramOption { - return []ProgramOption{ - CustomDecorator(decorateOptionalOr), + Overload("optional_map_index_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), } } -func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { varIdent := args[0] varName := "" switch varIdent.GetExprKind().(type) { case *exprpb.Expr_IdentExpr: varName = varIdent.GetIdentExpr().GetName() default: - return nil, meh.NewError(varIdent.GetId(), "optMap() variable name must be a simple identifier") + return nil, &common.Error{ + Message: "optMap() variable name must be a simple identifier", + Location: meh.OffsetLocation(varIdent.GetId()), + } } mapExpr := args[1] return meh.GlobalCall( @@ -414,30 +237,11 @@ func optMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exp ), nil } -func optFlatMap(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { - varIdent := args[0] - varName := "" - switch varIdent.GetExprKind().(type) { - case *exprpb.Expr_IdentExpr: - varName = varIdent.GetIdentExpr().GetName() - default: - return nil, meh.NewError(varIdent.GetId(), "optFlatMap() variable name must be a simple identifier") +// ProgramOptions implements the Library interface method. +func (optionalLibrary) ProgramOptions() []ProgramOption { + return []ProgramOption{ + CustomDecorator(decorateOptionalOr), } - mapExpr := args[1] - return meh.GlobalCall( - operators.Conditional, - meh.ReceiverCall(hasValueFunc, target), - meh.Fold( - unusedIterVar, - meh.NewList(), - varName, - meh.ReceiverCall(valueFunc, target), - meh.LiteralBool(false), - meh.Ident(varName), - mapExpr, - ), - meh.GlobalCall(optionalNoneFunc), - ), nil } func enableOptionalSyntax() EnvOption { @@ -554,16 +358,28 @@ var ( timeOverloadDeclarations = []EnvOption{ Function(overloads.TimeGetHours, MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType, - UnaryBinding(types.DurationGetHours))), + UnaryBinding(func(dur ref.Val) ref.Val { + d := dur.(types.Duration) + return types.Int(d.Hours()) + }))), Function(overloads.TimeGetMinutes, MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType, - UnaryBinding(types.DurationGetMinutes))), + UnaryBinding(func(dur ref.Val) ref.Val { + d := dur.(types.Duration) + return types.Int(d.Minutes()) + }))), Function(overloads.TimeGetSeconds, MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType, - UnaryBinding(types.DurationGetSeconds))), + UnaryBinding(func(dur ref.Val) ref.Val { + d := dur.(types.Duration) + return types.Int(d.Seconds()) + }))), Function(overloads.TimeGetMilliseconds, MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType, - UnaryBinding(types.DurationGetMilliseconds))), + UnaryBinding(func(dur ref.Val) ref.Val { + d := dur.(types.Duration) + return types.Int(d.Milliseconds()) + }))), Function(overloads.TimeGetFullYear, MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType, UnaryBinding(func(ts ref.Val) ref.Val { diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go index 1eb414c8be4..e48c5bf8eed 100644 --- a/vendor/github.com/google/cel-go/cel/macro.go +++ b/vendor/github.com/google/cel-go/cel/macro.go @@ -15,6 +15,7 @@ package cel import ( + "github.com/google/cel-go/common" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -62,21 +63,21 @@ func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { } // HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field) -func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { return parser.MakeHas(meh, target, args) } // ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the // elements in the range match the predicate expressions: // .exists(, ) -func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { return parser.MakeExists(meh, target, args) } // ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly // one of the elements in the range match the predicate expressions: // .exists_one(, ) -func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { return parser.MakeExistsOne(meh, target, args) } @@ -90,14 +91,14 @@ func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*ex // // In the second form only iterVar values which return true when provided to the predicate expression // are transformed. -func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { return parser.MakeMap(meh, target, args) } // FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains // only elements which match the provided predicate expression: // .filter(, ) -func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { +func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { return parser.MakeFilter(meh, target, args) } diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go index 05867730d36..07f3d6c7161 100644 --- a/vendor/github.com/google/cel-go/cel/options.go +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -23,13 +23,12 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/types/dynamicpb" - "github.com/google/cel-go/checker" + "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/functions" - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/pb" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/interpreter/functions" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -42,6 +41,13 @@ import ( const ( _ = iota + // Disallow heterogeneous aggregate (list, map) literals. + // Note, it is still possible to have heterogeneous aggregates when + // provided as variables to the expression, as well as via conversion + // of well-known dynamic types, or with unchecked expressions. + // Affects checking. Provides a subset of standard behavior. + featureDisableDynamicAggregateLiterals + // Enable the tracking of function call expressions replaced by macros. featureEnableMacroCallTracking @@ -57,10 +63,9 @@ const ( // is not already in UTC. featureDefaultUTCTimeZone - // Enable the serialization of logical operator ASTs as variadic calls, thus - // compressing the logic graph to a single call when multiple like-operator - // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) - featureVariadicLogicalASTs + // Enable the use of optional types in the syntax, type-system, type-checking, + // and runtime. + featureOptionalTypes ) // EnvOption is a functional interface for configuring the environment. @@ -77,26 +82,23 @@ func ClearMacros() EnvOption { } } -// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one. +// CustomTypeAdapter swaps the default ref.TypeAdapter implementation with a custom one. // // Note: This option must be specified before the Types and TypeDescs options when used together. -func CustomTypeAdapter(adapter types.Adapter) EnvOption { +func CustomTypeAdapter(adapter ref.TypeAdapter) EnvOption { return func(e *Env) (*Env, error) { e.adapter = adapter return e, nil } } -// CustomTypeProvider replaces the types.Provider implementation with a custom one. -// -// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated) +// CustomTypeProvider swaps the default ref.TypeProvider implementation with a custom one. // // Note: This option must be specified before the Types and TypeDescs options when used together. -func CustomTypeProvider(provider any) EnvOption { +func CustomTypeProvider(provider ref.TypeProvider) EnvOption { return func(e *Env) (*Env, error) { - var err error - e.provider, err = maybeInteropProvider(provider) - return e, err + e.provider = provider + return e, nil } } @@ -106,28 +108,8 @@ func CustomTypeProvider(provider any) EnvOption { // for the environment. The NewEnv call builds on top of the standard CEL declarations. For a // purely custom set of declarations use NewCustomEnv. func Declarations(decls ...*exprpb.Decl) EnvOption { - declOpts := []EnvOption{} - var err error - var opt EnvOption - // Convert the declarations to `EnvOption` values ahead of time. - // Surface any errors in conversion when the options are applied. - for _, d := range decls { - opt, err = ExprDeclToDeclaration(d) - if err != nil { - break - } - declOpts = append(declOpts, opt) - } return func(e *Env) (*Env, error) { - if err != nil { - return nil, err - } - for _, o := range declOpts { - e, err = o(e) - if err != nil { - return nil, err - } - } + e.declarations = append(e.declarations, decls...) return e, nil } } @@ -144,25 +126,14 @@ func EagerlyValidateDeclarations(enabled bool) EnvOption { return features(featureEagerlyValidateDeclarations, enabled) } -// HomogeneousAggregateLiterals disables mixed type list and map literal values. +// HomogeneousAggregateLiterals option ensures that list and map literal entry types must agree +// during type-checking. // // Note, it is still possible to have heterogeneous aggregates when provided as variables to the // expression, as well as via conversion of well-known dynamic types, or with unchecked // expressions. func HomogeneousAggregateLiterals() EnvOption { - return ASTValidators(ValidateHomogeneousAggregateLiterals()) -} - -// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single -// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as -// it will reduce the number of recursive calls needed to deserialize the AST later. -// -// For example, given the following expression the call graph will be rendered accordingly: -// -// expression: a && b && c && (d || e) -// ast: call(_&&_, [a, b, c, call(_||_, [d, e])]) -func variadicLogicalOperatorASTs() EnvOption { - return features(featureVariadicLogicalASTs, true) + return features(featureDisableDynamicAggregateLiterals, true) } // Macros option extends the macro set configured in the environment. @@ -255,12 +226,7 @@ func Abbrevs(qualifiedNames ...string) EnvOption { // Note: This option must be specified after the CustomTypeProvider option when used together. func Types(addTypes ...any) EnvOption { return func(e *Env) (*Env, error) { - var reg ref.TypeRegistry - var isReg bool - reg, isReg = e.provider.(*types.Registry) - if !isReg { - reg, isReg = e.provider.(ref.TypeRegistry) - } + reg, isReg := e.provider.(ref.TypeRegistry) if !isReg { return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) } @@ -470,24 +436,6 @@ func InterruptCheckFrequency(checkFrequency uint) ProgramOption { } } -// CostEstimatorOptions configure type-check time options for estimating expression cost. -func CostEstimatorOptions(costOpts ...checker.CostOption) EnvOption { - return func(e *Env) (*Env, error) { - e.costOptions = append(e.costOptions, costOpts...) - return e, nil - } -} - -// CostTrackerOptions configures a set of options for cost-tracking. -// -// Note, CostTrackerOptions is a no-op unless CostTracking is also enabled. -func CostTrackerOptions(costOpts ...interpreter.CostTrackerOption) ProgramOption { - return func(p *prog) (*prog, error) { - p.costOptions = append(p.costOptions, costOpts...) - return p, nil - } -} - // CostTracking enables cost tracking and registers a ActualCostEstimator that can optionally provide a runtime cost estimate for any function calls. func CostTracking(costEstimator interpreter.ActualCostEstimator) ProgramOption { return func(p *prog) (*prog, error) { @@ -509,21 +457,25 @@ func CostLimit(costLimit uint64) ProgramOption { } } -func fieldToCELType(field protoreflect.FieldDescriptor) (*Type, error) { +func fieldToCELType(field protoreflect.FieldDescriptor) (*exprpb.Type, error) { if field.Kind() == protoreflect.MessageKind || field.Kind() == protoreflect.GroupKind { msgName := (string)(field.Message().FullName()) - return ObjectType(msgName), nil + wellKnownType, found := pb.CheckedWellKnowns[msgName] + if found { + return wellKnownType, nil + } + return decls.NewObjectType(msgName), nil } - if primitiveType, found := types.ProtoCELPrimitives[field.Kind()]; found { + if primitiveType, found := pb.CheckedPrimitives[field.Kind()]; found { return primitiveType, nil } if field.Kind() == protoreflect.EnumKind { - return IntType, nil + return decls.Int, nil } return nil, fmt.Errorf("field %s type %s not implemented", field.FullName(), field.Kind().String()) } -func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) { +func fieldToDecl(field protoreflect.FieldDescriptor) (*exprpb.Decl, error) { name := string(field.Name()) if field.IsMap() { mapKey := field.MapKey() @@ -536,20 +488,20 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) { if err != nil { return nil, err } - return Variable(name, MapType(keyType, valueType)), nil + return decls.NewVar(name, decls.NewMapType(keyType, valueType)), nil } if field.IsList() { elemType, err := fieldToCELType(field) if err != nil { return nil, err } - return Variable(name, ListType(elemType)), nil + return decls.NewVar(name, decls.NewListType(elemType)), nil } celType, err := fieldToCELType(field) if err != nil { return nil, err } - return Variable(name, celType), nil + return decls.NewVar(name, celType), nil } // DeclareContextProto returns an option to extend CEL environment with declarations from the given context proto. @@ -557,51 +509,23 @@ func fieldToVariable(field protoreflect.FieldDescriptor) (EnvOption, error) { // https://github.com/google/cel-spec/blob/master/doc/langdef.md#evaluation-environment func DeclareContextProto(descriptor protoreflect.MessageDescriptor) EnvOption { return func(e *Env) (*Env, error) { + var decls []*exprpb.Decl fields := descriptor.Fields() for i := 0; i < fields.Len(); i++ { field := fields.Get(i) - variable, err := fieldToVariable(field) - if err != nil { - return nil, err - } - e, err = variable(e) + decl, err := fieldToDecl(field) if err != nil { return nil, err } + decls = append(decls, decl) } - return Types(dynamicpb.NewMessage(descriptor))(e) - } -} - -// ContextProtoVars uses the fields of the input proto.Messages as top-level variables within an Activation. -// -// Consider using with `DeclareContextProto` to simplify variable type declarations and publishing when using -// protocol buffers. -func ContextProtoVars(ctx proto.Message) (interpreter.Activation, error) { - if ctx == nil || !ctx.ProtoReflect().IsValid() { - return interpreter.EmptyActivation(), nil - } - reg, err := types.NewRegistry(ctx) - if err != nil { - return nil, err - } - pbRef := ctx.ProtoReflect() - typeName := string(pbRef.Descriptor().FullName()) - fields := pbRef.Descriptor().Fields() - vars := make(map[string]any, fields.Len()) - for i := 0; i < fields.Len(); i++ { - field := fields.Get(i) - sft, found := reg.FindStructFieldType(typeName, field.TextName()) - if !found { - return nil, fmt.Errorf("no such field: %s", field.TextName()) - } - fieldVal, err := sft.GetFrom(ctx) + var err error + e, err = Declarations(decls...)(e) if err != nil { return nil, err } - vars[field.TextName()] = fieldVal + return Types(dynamicpb.NewMessage(descriptor))(e) } - return interpreter.NewActivation(vars) } // EnableMacroCallTracking ensures that call expressions which are replaced by macros @@ -621,6 +545,13 @@ func DefaultUTCTimeZone(enabled bool) EnvOption { return features(featureDefaultUTCTimeZone, enabled) } +// OptionalTypes enable support for optional syntax and types in CEL. The optional value type makes +// it possible to express whether variables have been provided, whether a result has been computed, +// and in the future whether an object field path, map key value, or list index has a value. +func OptionalTypes() EnvOption { + return Lib(optionalLibrary{}) +} + // features sets the given feature flags. See list of Feature constants above. func features(flag int, enabled bool) EnvOption { return func(e *Env) (*Env, error) { @@ -646,14 +577,3 @@ func ParserExpressionSizeLimit(limit int) EnvOption { return e, nil } } - -func maybeInteropProvider(provider any) (types.Provider, error) { - switch p := provider.(type) { - case types.Provider: - return p, nil - case ref.TypeProvider: - return &interopCELTypeProvider{TypeProvider: p}, nil - default: - return nil, fmt.Errorf("unsupported type provider: %T", provider) - } -} diff --git a/vendor/github.com/google/cel-go/cel/program.go b/vendor/github.com/google/cel-go/cel/program.go index 2dd72f75010..a630f5bfa1f 100644 --- a/vendor/github.com/google/cel-go/cel/program.go +++ b/vendor/github.com/google/cel-go/cel/program.go @@ -19,10 +19,11 @@ import ( "fmt" "sync" - celast "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/interpreter" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // Program is an evaluable view of an Ast. @@ -61,9 +62,6 @@ func NoVars() interpreter.Activation { // PartialVars returns a PartialActivation which contains variables and a set of AttributePattern // values that indicate variables or parts of variables whose value are not yet known. // -// This method relies on manually configured sets of missing attribute patterns. For a method which -// infers the missing variables from the input and the configured environment, use Env.PartialVars(). -// // The `vars` value may either be an interpreter.Activation or any valid input to the // interpreter.NewActivation call. func PartialVars(vars any, @@ -106,7 +104,7 @@ func (ed *EvalDetails) State() interpreter.EvalState { // ActualCost returns the tracked cost through the course of execution when `CostTracking` is enabled. // Otherwise, returns nil if the cost was not enabled. func (ed *EvalDetails) ActualCost() *uint64 { - if ed == nil || ed.costTracker == nil { + if ed.costTracker == nil { return nil } cost := ed.costTracker.ActualCost() @@ -130,14 +128,10 @@ type prog struct { // Interpretable configured from an Ast and aggregate decorator set based on program options. interpretable interpreter.Interpretable callCostEstimator interpreter.ActualCostEstimator - costOptions []interpreter.CostTrackerOption costLimit *uint64 } func (p *prog) clone() *prog { - costOptsCopy := make([]interpreter.CostTrackerOption, len(p.costOptions)) - copy(costOptsCopy, p.costOptions) - return &prog{ Env: p.Env, evalOpts: p.evalOpts, @@ -159,10 +153,9 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { // Ensure the default attribute factory is set after the adapter and provider are // configured. p := &prog{ - Env: e, - decorators: []interpreter.InterpretableDecorator{}, - dispatcher: disp, - costOptions: []interpreter.CostTrackerOption{}, + Env: e, + decorators: []interpreter.InterpretableDecorator{}, + dispatcher: disp, } // Configure the program via the ProgramOption values. @@ -176,7 +169,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { // Add the function bindings created via Function() options. for _, fn := range e.functions { - bindings, err := fn.Bindings() + bindings, err := fn.bindings() if err != nil { return nil, err } @@ -215,11 +208,14 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { } // Enable compile-time checking of syntax/cardinality for string.format calls. if p.evalOpts&OptCheckStringFormat == OptCheckStringFormat { - var isValidType func(id int64, validTypes ...ref.Type) (bool, error) + var isValidType func(id int64, validTypes ...*types.TypeValue) (bool, error) if ast.IsChecked() { - isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { - t := ast.typeMap[id] - if t.Kind() == DynKind { + isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) { + t, err := ExprTypeToType(ast.typeMap[id]) + if err != nil { + return false, err + } + if t.kind == DynKind { return true, nil } for _, vt := range validTypes { @@ -227,7 +223,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { if err != nil { return false, err } - if t.Kind() == k { + if k == t.kind { return true, nil } } @@ -235,7 +231,7 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { } } else { // if the AST isn't type-checked, short-circuit validation - isValidType = func(id int64, validTypes ...ref.Type) (bool, error) { + isValidType = func(id int64, validTypes ...*types.TypeValue) (bool, error) { return true, nil } } @@ -247,12 +243,6 @@ func newProgram(e *Env, ast *Ast, opts []ProgramOption) (Program, error) { factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) { costTracker.Estimator = p.callCostEstimator costTracker.Limit = p.costLimit - for _, costOpt := range p.costOptions { - err := costOpt(costTracker) - if err != nil { - return nil, err - } - } // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This // prevents the underlying memory from being shared between factory function calls causing // undesired mutations. @@ -294,11 +284,10 @@ func (p *prog) initInterpretable(ast *Ast, decs []interpreter.InterpretableDecor } // When the AST has been checked it contains metadata that can be used to speed up program execution. - checked := &celast.CheckedAST{ - Expr: ast.Expr(), - SourceInfo: ast.SourceInfo(), - TypeMap: ast.typeMap, - ReferenceMap: ast.refMap, + var checked *exprpb.CheckedExpr + checked, err := AstToCheckedExpr(ast) + if err != nil { + return nil, err } interpretable, err := p.interpreter.NewInterpretable(checked, decs...) if err != nil { @@ -382,11 +371,7 @@ type progGen struct { // the test is successful. func newProgGen(factory progFactory) (Program, error) { // Test the factory to make sure that configuration errors are spotted at config - tracker, err := interpreter.NewCostTracker(nil) - if err != nil { - return nil, err - } - _, err = factory(interpreter.NewEvalState(), tracker) + _, err := factory(interpreter.NewEvalState(), &interpreter.CostTracker{}) if err != nil { return nil, err } @@ -399,10 +384,7 @@ func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) { // new EvalState instance for each call to ensure that unique evaluations yield unique stateful // results. state := interpreter.NewEvalState() - costTracker, err := interpreter.NewCostTracker(nil) - if err != nil { - return nil, nil, err - } + costTracker := &interpreter.CostTracker{} det := &EvalDetails{state: state, costTracker: costTracker} // Generate a new instance of the interpretable using the factory configured during the call to @@ -430,10 +412,7 @@ func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalD // new EvalState instance for each call to ensure that unique evaluations yield unique stateful // results. state := interpreter.NewEvalState() - costTracker, err := interpreter.NewCostTracker(nil) - if err != nil { - return nil, nil, err - } + costTracker := &interpreter.CostTracker{} det := &EvalDetails{state: state, costTracker: costTracker} // Generate a new instance of the interpretable using the factory configured during the call to @@ -519,7 +498,7 @@ type evalActivation struct { // The lazy binding will only be invoked once per evaluation. // // Values which are not represented as ref.Val types on input may be adapted to a ref.Val using -// the types.Adapter configured in the environment. +// the ref.TypeAdapter configured in the environment. func (a *evalActivation) ResolveName(name string) (any, bool) { v, found := a.vars[name] if !found { diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go deleted file mode 100644 index 78b31138186..00000000000 --- a/vendor/github.com/google/cel-go/cel/validator.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cel - -import ( - "fmt" - "reflect" - "regexp" - - "github.com/google/cel-go/common/ast" - "github.com/google/cel-go/common/overloads" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -const ( - homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous" - - // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure - // the set of function names which are exempt from homogeneous type checks. The expected type - // is a string list of function names. - // - // As an example, the `.format([args])` call expects the input arguments list to be - // comprised of a variety of types which correspond to the types expected by the format control - // clauses; however, all other uses of a mixed element type list, would be unexpected. - HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt" -) - -// ASTValidators configures a set of ASTValidator instances into the target environment. -// -// Validators are applied in the order in which the are specified and are treated as singletons. -// The same ASTValidator with a given name will not be applied more than once. -func ASTValidators(validators ...ASTValidator) EnvOption { - return func(e *Env) (*Env, error) { - for _, v := range validators { - if !e.HasValidator(v.Name()) { - e.validators = append(e.validators, v) - } - } - return e, nil - } -} - -// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment. -// -// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be -// reported to the caller. -type ASTValidator interface { - // Name returns the name of the validator. Names must be unique. - Name() string - - // Validate validates a given Ast within an Environment and collects a set of potential issues. - // - // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to - // the invocation of the Validate call. The expectation is that the validator configuration - // is created in sequence and immutable once provided to the Validate call. - // - // See individual validators for more information on their configuration keys and configuration - // properties. - Validate(*Env, ValidatorConfig, *ast.CheckedAST, *Issues) -} - -// ValidatorConfig provides an accessor method for querying validator configuration state. -type ValidatorConfig interface { - GetOrDefault(name string, value any) any -} - -// MutableValidatorConfig provides mutation methods for querying and updating validator configuration -// settings. -type MutableValidatorConfig interface { - ValidatorConfig - Set(name string, value any) error -} - -// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator, -// participates in validator configuration settings. -// -// This interface may be split from the expectation of being an ASTValidator instance in the future. -type ASTValidatorConfigurer interface { - Configure(MutableValidatorConfig) error -} - -// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces. -type validatorConfig struct { - data map[string]any -} - -// newValidatorConfig initializes the validator config with default values for core CEL validators. -func newValidatorConfig() *validatorConfig { - return &validatorConfig{ - data: map[string]any{ - HomogeneousAggregateLiteralExemptFunctions: []string{}, - }, - } -} - -// GetOrDefault returns the configured value for the name, if present, else the input default value. -// -// Note, the type-agreement between the input default and configured value is not checked on read. -func (config *validatorConfig) GetOrDefault(name string, value any) any { - v, found := config.data[name] - if !found { - return value - } - return v -} - -// Set configures a validator option with the given name and value. -// -// If the value had previously been set, the new value must have the same reflection type as the old one, -// or the call will error. -func (config *validatorConfig) Set(name string, value any) error { - v, found := config.data[name] - if found && reflect.TypeOf(v) != reflect.TypeOf(value) { - return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v) - } - config.data[name] = value - return nil -} - -// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors. -// -// - Validate duration and timestamp literals -// - Ensure regex strings are valid -// - Disable mixed type list and map literals -func ExtendedValidations() EnvOption { - return ASTValidators( - ValidateDurationLiterals(), - ValidateTimestampLiterals(), - ValidateRegexLiterals(), - ValidateHomogeneousAggregateLiterals(), - ) -} - -// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check. -func ValidateDurationLiterals() ASTValidator { - return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall) -} - -// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check. -func ValidateTimestampLiterals() ASTValidator { - return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall) -} - -// ValidateRegexLiterals ensures that regex patterns are validated after type-check. -func ValidateRegexLiterals() ASTValidator { - return newFormatValidator(overloads.Matches, 0, compileRegex) -} - -// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e. -// no mixed list element types or mixed map key or map value types. -// -// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all -// literals which occur within string format calls. -func ValidateHomogeneousAggregateLiterals() ASTValidator { - return homogeneousAggregateLiteralValidator{} -} - -// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit. -// -// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial -// time to complete. -// -// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have -// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable -// assignments and supplies an empty iteration range, so they won't count against the nesting limit either. -func ValidateComprehensionNestingLimit(limit int) ASTValidator { - return nestingLimitValidator{limit: limit} -} - -type argChecker func(env *Env, call, arg ast.NavigableExpr) error - -func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator { - return formatValidator{ - funcName: funcName, - check: check, - argNum: argNum, - } -} - -type formatValidator struct { - funcName string - argNum int - check argChecker -} - -// Name returns the unique name of this function format validator. -func (v formatValidator) Name() string { - return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName) -} - -// Validate searches the AST for uses of a given function name with a constant argument and performs a check -// on whether the argument is a valid literal value. -func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - root := ast.NavigateCheckedAST(a) - funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName)) - for _, call := range funcCalls { - callArgs := call.AsCall().Args() - if len(callArgs) <= v.argNum { - continue - } - litArg := callArgs[v.argNum] - if litArg.Kind() != ast.LiteralKind { - continue - } - if err := v.check(e, call, litArg); err != nil { - iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName) - } - } -} - -func evalCall(env *Env, call, arg ast.NavigableExpr) error { - ast := ParsedExprToAst(&exprpb.ParsedExpr{Expr: call.ToExpr()}) - prg, err := env.Program(ast) - if err != nil { - return err - } - _, _, err = prg.Eval(NoVars()) - return err -} - -func compileRegex(_ *Env, _, arg ast.NavigableExpr) error { - pattern := arg.AsLiteral().Value().(string) - _, err := regexp.Compile(pattern) - return err -} - -type homogeneousAggregateLiteralValidator struct{} - -// Name returns the unique name of the homogeneous type validator. -func (homogeneousAggregateLiteralValidator) Name() string { - return homogeneousValidatorName -} - -// Configure implements the ASTValidatorConfigurer interface and currently sets the list of standard -// and exempt functions from homogeneous aggregate literal checks. -// -// TODO: Move this call into the string.format() ASTValidator once ported. -func (homogeneousAggregateLiteralValidator) Configure(c MutableValidatorConfig) error { - emptyList := []string{} - exemptFunctions := c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, emptyList).([]string) - exemptFunctions = append(exemptFunctions, "format") - return c.Set(HomogeneousAggregateLiteralExemptFunctions, exemptFunctions) -} - -// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types. -// -// This validator makes an exception for list and map literals which occur at any level of nesting within -// string format calls. -func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - var exemptedFunctions []string - exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string) - root := ast.NavigateCheckedAST(a) - listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind)) - for _, listExpr := range listExprs { - if inExemptFunction(listExpr, exemptedFunctions) { - continue - } - l := listExpr.AsList() - elements := l.Elements() - optIndices := l.OptionalIndices() - var elemType *Type - for i, e := range elements { - et := e.Type() - if isOptionalIndex(i, optIndices) { - et = et.Parameters()[0] - } - if elemType == nil { - elemType = et - continue - } - if !elemType.IsEquivalentType(et) { - v.typeMismatch(iss, e.ID(), elemType, et) - break - } - } - } - mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind)) - for _, mapExpr := range mapExprs { - if inExemptFunction(mapExpr, exemptedFunctions) { - continue - } - m := mapExpr.AsMap() - entries := m.Entries() - var keyType, valType *Type - for _, e := range entries { - key, val := e.Key(), e.Value() - kt, vt := key.Type(), val.Type() - if e.IsOptional() { - vt = vt.Parameters()[0] - } - if keyType == nil && valType == nil { - keyType, valType = kt, vt - continue - } - if !keyType.IsEquivalentType(kt) { - v.typeMismatch(iss, key.ID(), keyType, kt) - } - if !valType.IsEquivalentType(vt) { - v.typeMismatch(iss, val.ID(), valType, vt) - } - } - } -} - -func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { - if parent, found := e.Parent(); found { - if parent.Kind() == ast.CallKind { - fnName := parent.AsCall().FunctionName() - for _, exempt := range exemptFunctions { - if exempt == fnName { - return true - } - } - } - if parent.Kind() == ast.ListKind || parent.Kind() == ast.MapKind { - return inExemptFunction(parent, exemptFunctions) - } - } - return false -} - -func isOptionalIndex(i int, optIndices []int32) bool { - for _, optInd := range optIndices { - if i == int(optInd) { - return true - } - } - return false -} - -func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) { - iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual)) -} - -type nestingLimitValidator struct { - limit int -} - -func (v nestingLimitValidator) Name() string { - return "cel.lib.std.validate.comprehension_nesting_limit" -} - -func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.CheckedAST, iss *Issues) { - root := ast.NavigateCheckedAST(a) - comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) - if len(comprehensions) <= v.limit { - return - } - for _, comp := range comprehensions { - count := 0 - e := comp - hasParent := true - for hasParent { - // When the expression is not a comprehension, continue to the next ancestor. - if e.Kind() != ast.ComprehensionKind { - e, hasParent = e.Parent() - continue - } - // When the comprehension has an empty range, continue to the next ancestor - // as this comprehension does not have any associated cost. - iterRange := e.AsComprehension().IterRange() - if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 { - e, hasParent = e.Parent() - continue - } - // Otherwise check the nesting limit. - count++ - if count > v.limit { - iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit") - break - } - e, hasParent = e.Parent() - } - } -} diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel index 0459d35239f..1c6ddb7f7da 100644 --- a/vendor/github.com/google/cel-go/checker/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -11,11 +11,9 @@ go_library( "cost.go", "env.go", "errors.go", - "format.go", "mapping.go", "options.go", "printer.go", - "scopes.go", "standard.go", "types.go", ], @@ -24,13 +22,10 @@ go_library( deps = [ "//checker/decls:go_default_library", "//common:go_default_library", - "//common/ast:go_default_library", "//common/containers:go_default_library", "//common/debug:go_default_library", - "//common/decls:go_default_library", "//common/operators:go_default_library", "//common/overloads:go_default_library", - "//common/stdlib:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", @@ -49,7 +44,6 @@ go_test( "checker_test.go", "cost_test.go", "env_test.go", - "format_test.go", ], embed = [ ":go_default_library", diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go index 720e4fa968f..257cffecf66 100644 --- a/vendor/github.com/google/cel-go/checker/checker.go +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -18,13 +18,15 @@ package checker import ( "fmt" + "reflect" + "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common" - "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + "google.golang.org/protobuf/proto" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -35,8 +37,8 @@ type checker struct { mappings *mapping freeTypeVarCounter int sourceInfo *exprpb.SourceInfo - types map[int64]*types.Type - references map[int64]*ast.ReferenceInfo + types map[int64]*exprpb.Type + references map[int64]*exprpb.Reference } // Check performs type checking, giving a typed AST. @@ -45,38 +47,40 @@ type checker struct { // descriptions of protocol buffers, and a registry for errors. // Returns a CheckedExpr proto, which might not be usable if // there are errors in the error registry. -func Check(parsedExpr *exprpb.ParsedExpr, source common.Source, env *Env) (*ast.CheckedAST, *common.Errors) { - errs := common.NewErrors(source) +func Check(parsedExpr *exprpb.ParsedExpr, + source common.Source, + env *Env) (*exprpb.CheckedExpr, *common.Errors) { c := checker{ env: env, - errors: &typeErrors{errs: errs}, + errors: &typeErrors{common.NewErrors(source)}, mappings: newMapping(), freeTypeVarCounter: 0, sourceInfo: parsedExpr.GetSourceInfo(), - types: make(map[int64]*types.Type), - references: make(map[int64]*ast.ReferenceInfo), + types: make(map[int64]*exprpb.Type), + references: make(map[int64]*exprpb.Reference), } c.check(parsedExpr.GetExpr()) // Walk over the final type map substituting any type parameters either by their bound value or // by DYN. - m := make(map[int64]*types.Type) - for id, t := range c.types { - m[id] = substitute(c.mappings, t, true) + m := make(map[int64]*exprpb.Type) + for k, v := range c.types { + m[k] = substitute(c.mappings, v, true) } - return &ast.CheckedAST{ + return &exprpb.CheckedExpr{ Expr: parsedExpr.GetExpr(), SourceInfo: parsedExpr.GetSourceInfo(), TypeMap: m, ReferenceMap: c.references, - }, errs + }, c.errors.Errors } func (c *checker) check(e *exprpb.Expr) { if e == nil { return } + switch e.GetExprKind().(type) { case *exprpb.Expr_ConstExpr: literal := e.GetConstExpr() @@ -109,51 +113,53 @@ func (c *checker) check(e *exprpb.Expr) { case *exprpb.Expr_ComprehensionExpr: c.checkComprehension(e) default: - c.errors.unexpectedASTType(e.GetId(), c.location(e), e) + c.errors.ReportError( + c.location(e), "Unrecognized ast type: %v", reflect.TypeOf(e)) } } func (c *checker) checkInt64Literal(e *exprpb.Expr) { - c.setType(e, types.IntType) + c.setType(e, decls.Int) } func (c *checker) checkUint64Literal(e *exprpb.Expr) { - c.setType(e, types.UintType) + c.setType(e, decls.Uint) } func (c *checker) checkStringLiteral(e *exprpb.Expr) { - c.setType(e, types.StringType) + c.setType(e, decls.String) } func (c *checker) checkBytesLiteral(e *exprpb.Expr) { - c.setType(e, types.BytesType) + c.setType(e, decls.Bytes) } func (c *checker) checkDoubleLiteral(e *exprpb.Expr) { - c.setType(e, types.DoubleType) + c.setType(e, decls.Double) } func (c *checker) checkBoolLiteral(e *exprpb.Expr) { - c.setType(e, types.BoolType) + c.setType(e, decls.Bool) } func (c *checker) checkNullLiteral(e *exprpb.Expr) { - c.setType(e, types.NullType) + c.setType(e, decls.Null) } func (c *checker) checkIdent(e *exprpb.Expr) { identExpr := e.GetIdentExpr() // Check to see if the identifier is declared. if ident := c.env.LookupIdent(identExpr.GetName()); ident != nil { - c.setType(e, ident.Type()) - c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + c.setType(e, ident.GetIdent().GetType()) + c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue())) // Overwrite the identifier with its fully qualified name. - identExpr.Name = ident.Name() + identExpr.Name = ident.GetName() return } - c.setType(e, types.ErrorType) - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), identExpr.GetName()) + c.setType(e, decls.Error) + c.errors.undeclaredReference( + c.location(e), c.env.container.Name(), identExpr.GetName()) } func (c *checker) checkSelect(e *exprpb.Expr) { @@ -168,9 +174,9 @@ func (c *checker) checkSelect(e *exprpb.Expr) { // Rewrite the node to be a variable reference to the resolved fully-qualified // variable name. - c.setType(e, ident.Type()) - c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) - identName := ident.Name() + c.setType(e, ident.GetIdent().GetType()) + c.setReference(e, newIdentReference(ident.GetName(), ident.GetIdent().GetValue())) + identName := ident.GetName() e.ExprKind = &exprpb.Expr_IdentExpr{ IdentExpr: &exprpb.Expr_Ident{ Name: identName, @@ -182,7 +188,7 @@ func (c *checker) checkSelect(e *exprpb.Expr) { resultType := c.checkSelectField(e, sel.GetOperand(), sel.GetField(), false) if sel.TestOnly { - resultType = types.BoolType + resultType = decls.Bool } c.setType(e, substitute(c.mappings, resultType, false)) } @@ -194,17 +200,16 @@ func (c *checker) checkOptSelect(e *exprpb.Expr) { field := call.GetArgs()[1] fieldName, isString := maybeUnwrapString(field) if !isString { - c.errors.notAnOptionalFieldSelection(field.GetId(), c.location(field), field) + c.errors.ReportError(c.location(field), "unsupported optional field selection: %v", field) return } // Perform type-checking using the field selection logic. resultType := c.checkSelectField(e, operand, fieldName, true) c.setType(e, substitute(c.mappings, resultType, false)) - c.setReference(e, ast.NewFunctionReference("select_optional_field")) } -func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *types.Type { +func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, optional bool) *exprpb.Type { // Interpret as field selection, first traversing down the operand. c.check(operand) operandType := substitute(c.mappings, c.getType(operand), false) @@ -213,37 +218,38 @@ func (c *checker) checkSelectField(e, operand *exprpb.Expr, field string, option targetType, isOpt := maybeUnwrapOptional(operandType) // Assume error type by default as most types do not support field selection. - resultType := types.ErrorType - switch targetType.Kind() { - case types.MapKind: + resultType := decls.Error + switch kindOf(targetType) { + case kindMap: // Maps yield their value type as the selection result type. - resultType = targetType.Parameters()[1] - case types.StructKind: + mapType := targetType.GetMapType() + resultType = mapType.GetValueType() + case kindObject: // Objects yield their field type declaration as the selection result type, but only if // the field is defined. messageType := targetType - if fieldType, found := c.lookupFieldType(e.GetId(), messageType.TypeName(), field); found { - resultType = fieldType + if fieldType, found := c.lookupFieldType(c.location(e), messageType.GetMessageType(), field); found { + resultType = fieldType.Type } - case types.TypeParamKind: + case kindTypeParam: // Set the operand type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type // substitutions for the type param under the covers. - c.isAssignable(types.DynType, targetType) + c.isAssignable(decls.Dyn, targetType) // Also, set the result type to DYN. - resultType = types.DynType + resultType = decls.Dyn default: // Dynamic / error values are treated as DYN type. Errors are handled this way as well // in order to allow forward progress on the check. if !isDynOrError(targetType) { - c.errors.typeDoesNotSupportFieldSelection(e.GetId(), c.location(e), targetType) + c.errors.typeDoesNotSupportFieldSelection(c.location(e), targetType) } - resultType = types.DynType + resultType = decls.Dyn } // If the target type was optional coming in, then the result must be optional going out. if isOpt || optional { - return types.NewOptionalType(resultType) + return decls.NewOptionalType(resultType) } return resultType } @@ -271,14 +277,15 @@ func (c *checker) checkCall(e *exprpb.Expr) { // Check for the existence of the function. fn := c.env.LookupFunction(fnName) if fn == nil { - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) - c.setType(e, types.ErrorType) + c.errors.undeclaredReference( + c.location(e), c.env.container.Name(), fnName) + c.setType(e, decls.Error) return } // Overwrite the function name with its fully qualified resolved name. - call.Function = fn.Name() + call.Function = fn.GetName() // Check to see whether the overload resolves. - c.resolveOverloadOrError(e, fn, nil, args) + c.resolveOverloadOrError(c.location(e), e, fn, nil, args) return } @@ -296,8 +303,8 @@ func (c *checker) checkCall(e *exprpb.Expr) { // be an inaccurate representation of the desired evaluation behavior. // Overwrite with fully-qualified resolved function name sans receiver target. call.Target = nil - call.Function = fn.Name() - c.resolveOverloadOrError(e, fn, nil, args) + call.Function = fn.GetName() + c.resolveOverloadOrError(c.location(e), e, fn, nil, args) return } } @@ -307,21 +314,22 @@ func (c *checker) checkCall(e *exprpb.Expr) { fn := c.env.LookupFunction(fnName) // Function found, attempt overload resolution. if fn != nil { - c.resolveOverloadOrError(e, fn, target, args) + c.resolveOverloadOrError(c.location(e), e, fn, target, args) return } // Function name not declared, record error. - c.setType(e, types.ErrorType) - c.errors.undeclaredReference(e.GetId(), c.location(e), c.env.container.Name(), fnName) + c.errors.undeclaredReference(c.location(e), c.env.container.Name(), fnName) } func (c *checker) resolveOverloadOrError( - e *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) { + loc common.Location, + e *exprpb.Expr, + fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) { // Attempt to resolve the overload. - resolution := c.resolveOverload(e, fn, target, args) + resolution := c.resolveOverload(loc, fn, target, args) // No such overload, error noted in the resolveOverload call, type recorded here. if resolution == nil { - c.setType(e, types.ErrorType) + c.setType(e, decls.Error) return } // Overload found. @@ -330,9 +338,10 @@ func (c *checker) resolveOverloadOrError( } func (c *checker) resolveOverload( - call *exprpb.Expr, fn *decls.FunctionDecl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution { + loc common.Location, + fn *exprpb.Decl, target *exprpb.Expr, args []*exprpb.Expr) *overloadResolution { - var argTypes []*types.Type + var argTypes []*exprpb.Type if target != nil { argTypes = append(argTypes, c.getType(target)) } @@ -340,75 +349,55 @@ func (c *checker) resolveOverload( argTypes = append(argTypes, c.getType(arg)) } - var resultType *types.Type - var checkedRef *ast.ReferenceInfo - for _, overload := range fn.OverloadDecls() { + var resultType *exprpb.Type + var checkedRef *exprpb.Reference + for _, overload := range fn.GetFunction().GetOverloads() { // Determine whether the overload is currently considered. - if c.env.isOverloadDisabled(overload.ID()) { + if c.env.isOverloadDisabled(overload.GetOverloadId()) { continue } // Ensure the call style for the overload matches. - if (target == nil && overload.IsMemberFunction()) || - (target != nil && !overload.IsMemberFunction()) { + if (target == nil && overload.GetIsInstanceFunction()) || + (target != nil && !overload.GetIsInstanceFunction()) { // not a compatible call style. continue } - // Alternative type-checking behavior when the logical operators are compacted into - // variadic AST representations. - if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr { - checkedRef = ast.NewFunctionReference(overload.ID()) - for i, argType := range argTypes { - if !c.isAssignable(argType, types.BoolType) { - c.errors.typeMismatch( - args[i].GetId(), - c.locationByID(args[i].GetId()), - types.BoolType, - argType) - resultType = types.ErrorType - } - } - if isError(resultType) { - return nil - } - return newResolution(checkedRef, types.BoolType) - } - - overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...) - typeParams := overload.TypeParams() - if len(typeParams) != 0 { + overloadType := decls.NewFunctionType(overload.ResultType, overload.Params...) + if len(overload.GetTypeParams()) > 0 { // Instantiate overload's type with fresh type variables. substitutions := newMapping() - for _, typePar := range typeParams { - substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar()) + for _, typePar := range overload.GetTypeParams() { + substitutions.add(decls.NewTypeParamType(typePar), c.newTypeVar()) } overloadType = substitute(substitutions, overloadType, false) } - candidateArgTypes := overloadType.Parameters()[1:] + candidateArgTypes := overloadType.GetFunction().GetArgTypes() if c.isAssignableList(argTypes, candidateArgTypes) { if checkedRef == nil { - checkedRef = ast.NewFunctionReference(overload.ID()) + checkedRef = newFunctionReference(overload.GetOverloadId()) } else { - checkedRef.AddOverload(overload.ID()) + checkedRef.OverloadId = append(checkedRef.GetOverloadId(), overload.GetOverloadId()) } // First matching overload, determines result type. - fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false) + fnResultType := substitute(c.mappings, overloadType.GetFunction().GetResultType(), false) if resultType == nil { resultType = fnResultType - } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) { - resultType = types.DynType + } else if !isDyn(resultType) && !proto.Equal(fnResultType, resultType) { + resultType = decls.Dyn } } } if resultType == nil { - for i, argType := range argTypes { - argTypes[i] = substitute(c.mappings, argType, true) + for i, arg := range argTypes { + argTypes[i] = substitute(c.mappings, arg, true) } - c.errors.noMatchingOverload(call.GetId(), c.location(call), fn.Name(), argTypes, target != nil) + c.errors.noMatchingOverload(loc, fn.GetName(), argTypes, target != nil) + resultType = decls.Error return nil } @@ -417,7 +406,7 @@ func (c *checker) resolveOverload( func (c *checker) checkCreateList(e *exprpb.Expr) { create := e.GetListExpr() - var elemsType *types.Type + var elemsType *exprpb.Type optionalIndices := create.GetOptionalIndices() optionals := make(map[int32]bool, len(optionalIndices)) for _, optInd := range optionalIndices { @@ -430,16 +419,16 @@ func (c *checker) checkCreateList(e *exprpb.Expr) { var isOptional bool elemType, isOptional = maybeUnwrapOptional(elemType) if !isOptional && !isDyn(elemType) { - c.errors.typeMismatch(e.GetId(), c.location(e), types.NewOptionalType(elemType), elemType) + c.errors.typeMismatch(c.location(e), decls.NewOptionalType(elemType), elemType) } } - elemsType = c.joinTypes(e, elemsType, elemType) + elemsType = c.joinTypes(c.location(e), elemsType, elemType) } if elemsType == nil { // If the list is empty, assign free type var to elem type. elemsType = c.newTypeVar() } - c.setType(e, types.NewListType(elemsType)) + c.setType(e, decls.NewListType(elemsType)) } func (c *checker) checkCreateStruct(e *exprpb.Expr) { @@ -453,12 +442,12 @@ func (c *checker) checkCreateStruct(e *exprpb.Expr) { func (c *checker) checkCreateMap(e *exprpb.Expr) { mapVal := e.GetStructExpr() - var mapKeyType *types.Type - var mapValueType *types.Type + var mapKeyType *exprpb.Type + var mapValueType *exprpb.Type for _, ent := range mapVal.GetEntries() { key := ent.GetMapKey() c.check(key) - mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key)) + mapKeyType = c.joinTypes(c.location(key), mapKeyType, c.getType(key)) val := ent.GetValue() c.check(val) @@ -467,54 +456,50 @@ func (c *checker) checkCreateMap(e *exprpb.Expr) { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(val.GetId(), c.location(val), types.NewOptionalType(valType), valType) + c.errors.typeMismatch(c.location(val), decls.NewOptionalType(valType), valType) } } - mapValueType = c.joinTypes(val, mapValueType, valType) + mapValueType = c.joinTypes(c.location(val), mapValueType, valType) } if mapKeyType == nil { // If the map is empty, assign free type variables to typeKey and value type. mapKeyType = c.newTypeVar() mapValueType = c.newTypeVar() } - c.setType(e, types.NewMapType(mapKeyType, mapValueType)) + c.setType(e, decls.NewMapType(mapKeyType, mapValueType)) } func (c *checker) checkCreateMessage(e *exprpb.Expr) { msgVal := e.GetStructExpr() // Determine the type of the message. - resultType := types.ErrorType - ident := c.env.LookupIdent(msgVal.GetMessageName()) - if ident == nil { + messageType := decls.Error + decl := c.env.LookupIdent(msgVal.GetMessageName()) + if decl == nil { c.errors.undeclaredReference( - e.GetId(), c.location(e), c.env.container.Name(), msgVal.GetMessageName()) - c.setType(e, types.ErrorType) + c.location(e), c.env.container.Name(), msgVal.GetMessageName()) return } // Ensure the type name is fully qualified in the AST. - typeName := ident.Name() - msgVal.MessageName = typeName - c.setReference(e, ast.NewIdentReference(ident.Name(), nil)) - identKind := ident.Type().Kind() - if identKind != types.ErrorKind { - if identKind != types.TypeKind { - c.errors.notAType(e.GetId(), c.location(e), ident.Type().DeclaredTypeName()) + msgVal.MessageName = decl.GetName() + c.setReference(e, newIdentReference(decl.GetName(), nil)) + ident := decl.GetIdent() + identKind := kindOf(ident.GetType()) + if identKind != kindError { + if identKind != kindType { + c.errors.notAType(c.location(e), ident.GetType()) } else { - resultType = ident.Type().Parameters()[0] - // Backwards compatibility test between well-known types and message types - // In this context, the type is being instantiated by its protobuf name which - // is not ideal or recommended, but some users expect this to work. - if isWellKnownType(resultType) { - typeName = getWellKnownTypeName(resultType) - } else if resultType.Kind() == types.StructKind { - typeName = resultType.DeclaredTypeName() - } else { - c.errors.notAMessageType(e.GetId(), c.location(e), resultType.DeclaredTypeName()) - resultType = types.ErrorType + messageType = ident.GetType().GetType() + if kindOf(messageType) != kindObject { + c.errors.notAMessageType(c.location(e), messageType) + messageType = decls.Error } } } - c.setType(e, resultType) + if isObjectWellKnownType(messageType) { + c.setType(e, getObjectWellKnownType(messageType)) + } else { + c.setType(e, messageType) + } // Check the field initializers. for _, ent := range msgVal.GetEntries() { @@ -522,10 +507,10 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { value := ent.GetValue() c.check(value) - fieldType := types.ErrorType - ft, found := c.lookupFieldType(ent.GetId(), typeName, field) + fieldType := decls.Error + ft, found := c.lookupFieldType(c.locationByID(ent.GetId()), messageType.GetMessageType(), field) if found { - fieldType = ft + fieldType = ft.Type } valType := c.getType(value) @@ -533,11 +518,11 @@ func (c *checker) checkCreateMessage(e *exprpb.Expr) { var isOptional bool valType, isOptional = maybeUnwrapOptional(valType) if !isOptional && !isDyn(valType) { - c.errors.typeMismatch(value.GetId(), c.location(value), types.NewOptionalType(valType), valType) + c.errors.typeMismatch(c.location(value), decls.NewOptionalType(valType), valType) } } if !c.isAssignable(fieldType, valType) { - c.errors.fieldTypeMismatch(ent.GetId(), c.locationByID(ent.GetId()), field, fieldType, valType) + c.errors.fieldTypeMismatch(c.locationByID(ent.Id), field, fieldType, valType) } } } @@ -548,36 +533,36 @@ func (c *checker) checkComprehension(e *exprpb.Expr) { c.check(comp.GetAccuInit()) accuType := c.getType(comp.GetAccuInit()) rangeType := substitute(c.mappings, c.getType(comp.GetIterRange()), false) - var varType *types.Type + var varType *exprpb.Type - switch rangeType.Kind() { - case types.ListKind: - varType = rangeType.Parameters()[0] - case types.MapKind: + switch kindOf(rangeType) { + case kindList: + varType = rangeType.GetListType().GetElemType() + case kindMap: // Ranges over the keys. - varType = rangeType.Parameters()[0] - case types.DynKind, types.ErrorKind, types.TypeParamKind: + varType = rangeType.GetMapType().GetKeyType() + case kindDyn, kindError, kindTypeParam: // Set the range type to DYN to prevent assignment to a potentially incorrect type // at a later point in type-checking. The isAssignable call will update the type // substitutions for the type param under the covers. - c.isAssignable(types.DynType, rangeType) + c.isAssignable(decls.Dyn, rangeType) // Set the range iteration variable to type DYN as well. - varType = types.DynType + varType = decls.Dyn default: - c.errors.notAComprehensionRange(comp.GetIterRange().GetId(), c.location(comp.GetIterRange()), rangeType) - varType = types.ErrorType + c.errors.notAComprehensionRange(c.location(comp.GetIterRange()), rangeType) + varType = decls.Error } // Create a scope for the comprehension since it has a local accumulation variable. // This scope will contain the accumulation variable used to compute the result. c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.GetAccuVar(), accuType)) + c.env.Add(decls.NewVar(comp.GetAccuVar(), accuType)) // Create a block scope for the loop. c.env = c.env.enterScope() - c.env.AddIdents(decls.NewVariable(comp.GetIterVar(), varType)) + c.env.Add(decls.NewVar(comp.GetIterVar(), varType)) // Check the variable references in the condition and step. c.check(comp.GetLoopCondition()) - c.assertType(comp.GetLoopCondition(), types.BoolType) + c.assertType(comp.GetLoopCondition(), decls.Bool) c.check(comp.GetLoopStep()) c.assertType(comp.GetLoopStep(), accuType) // Exit the loop's block scope before checking the result. @@ -589,7 +574,9 @@ func (c *checker) checkComprehension(e *exprpb.Expr) { } // Checks compatibility of joined types, and returns the most general common type. -func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *types.Type { +func (c *checker) joinTypes(loc common.Location, + previous *exprpb.Type, + current *exprpb.Type) *exprpb.Type { if previous == nil { return current } @@ -597,23 +584,23 @@ func (c *checker) joinTypes(e *exprpb.Expr, previous, current *types.Type) *type return mostGeneral(previous, current) } if c.dynAggregateLiteralElementTypesEnabled() { - return types.DynType + return decls.Dyn } - c.errors.typeMismatch(e.GetId(), c.location(e), previous, current) - return types.ErrorType + c.errors.typeMismatch(loc, previous, current) + return decls.Error } func (c *checker) dynAggregateLiteralElementTypesEnabled() bool { return c.env.aggLitElemType == dynElementType } -func (c *checker) newTypeVar() *types.Type { +func (c *checker) newTypeVar() *exprpb.Type { id := c.freeTypeVarCounter c.freeTypeVarCounter++ - return types.NewTypeParamType(fmt.Sprintf("_var%d", id)) + return decls.NewTypeParamType(fmt.Sprintf("_var%d", id)) } -func (c *checker) isAssignable(t1, t2 *types.Type) bool { +func (c *checker) isAssignable(t1 *exprpb.Type, t2 *exprpb.Type) bool { subs := isAssignable(c.mappings, t1, t2) if subs != nil { c.mappings = subs @@ -623,7 +610,7 @@ func (c *checker) isAssignable(t1, t2 *types.Type) bool { return false } -func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { +func (c *checker) isAssignableList(l1 []*exprpb.Type, l2 []*exprpb.Type) bool { subs := isAssignableList(c.mappings, l1, l2) if subs != nil { c.mappings = subs @@ -633,52 +620,57 @@ func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { return false } -func maybeUnwrapString(e *exprpb.Expr) (string, bool) { - switch e.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - literal := e.GetConstExpr() - switch literal.GetConstantKind().(type) { - case *exprpb.Constant_StringValue: - return literal.GetStringValue(), true - } +func (c *checker) lookupFieldType(l common.Location, messageType string, fieldName string) (*ref.FieldType, bool) { + if _, found := c.env.provider.FindType(messageType); !found { + // This should not happen, anyway, report an error. + c.errors.unexpectedFailedResolution(l, messageType) + return nil, false + } + + if ft, found := c.env.provider.FindFieldType(messageType, fieldName); found { + return ft, found } - return "", false + + c.errors.undefinedField(l, fieldName) + return nil, false } -func (c *checker) setType(e *exprpb.Expr, t *types.Type) { - if old, found := c.types[e.GetId()]; found && !old.IsExactType(t) { - c.errors.incompatibleType(e.GetId(), c.location(e), e, old, t) +func (c *checker) setType(e *exprpb.Expr, t *exprpb.Type) { + if old, found := c.types[e.GetId()]; found && !proto.Equal(old, t) { + c.errors.ReportError(c.location(e), + "(Incompatible) Type already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, t) return } c.types[e.GetId()] = t } -func (c *checker) getType(e *exprpb.Expr) *types.Type { +func (c *checker) getType(e *exprpb.Expr) *exprpb.Type { return c.types[e.GetId()] } -func (c *checker) setReference(e *exprpb.Expr, r *ast.ReferenceInfo) { - if old, found := c.references[e.GetId()]; found && !old.Equals(r) { - c.errors.referenceRedefinition(e.GetId(), c.location(e), e, old, r) +func (c *checker) setReference(e *exprpb.Expr, r *exprpb.Reference) { + if old, found := c.references[e.GetId()]; found && !proto.Equal(old, r) { + c.errors.ReportError(c.location(e), + "Reference already exists for expression: %v(%d) old:%v, new:%v", e, e.GetId(), old, r) return } c.references[e.GetId()] = r } -func (c *checker) assertType(e *exprpb.Expr, t *types.Type) { +func (c *checker) assertType(e *exprpb.Expr, t *exprpb.Type) { if !c.isAssignable(t, c.getType(e)) { - c.errors.typeMismatch(e.GetId(), c.location(e), t, c.getType(e)) + c.errors.typeMismatch(c.location(e), t, c.getType(e)) } } type overloadResolution struct { - Type *types.Type - Reference *ast.ReferenceInfo + Reference *exprpb.Reference + Type *exprpb.Type } -func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution { +func newResolution(checkedRef *exprpb.Reference, t *exprpb.Type) *overloadResolution { return &overloadResolution{ - Reference: r, + Reference: checkedRef, Type: t, } } @@ -705,56 +697,10 @@ func (c *checker) locationByID(id int64) common.Location { return common.NoLocation } -func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) { - if _, found := c.env.provider.FindStructType(structType); !found { - // This should not happen, anyway, report an error. - c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType) - return nil, false - } - - if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found { - return ft.Type, found - } - - c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName) - return nil, false -} - -func isWellKnownType(t *types.Type) bool { - switch t.Kind() { - case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind: - return true - case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind: - return t.IsAssignableType(types.NullType) - case types.ListKind: - return t.Parameters()[0] == types.DynType - case types.MapKind: - return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType - } - return false +func newIdentReference(name string, value *exprpb.Constant) *exprpb.Reference { + return &exprpb.Reference{Name: name, Value: value} } -func getWellKnownTypeName(t *types.Type) string { - if name, found := wellKnownTypes[t.Kind()]; found { - return name - } - return "" +func newFunctionReference(overloads ...string) *exprpb.Reference { + return &exprpb.Reference{OverloadId: overloads} } - -var ( - wellKnownTypes = map[types.Kind]string{ - types.AnyKind: "google.protobuf.Any", - types.BoolKind: "google.protobuf.BoolValue", - types.BytesKind: "google.protobuf.BytesValue", - types.DoubleKind: "google.protobuf.DoubleValue", - types.DurationKind: "google.protobuf.Duration", - types.DynKind: "google.protobuf.Value", - types.IntKind: "google.protobuf.Int64Value", - types.ListKind: "google.protobuf.ListValue", - types.NullTypeKind: "google.protobuf.NullValue", - types.MapKind: "google.protobuf.Struct", - types.StringKind: "google.protobuf.StringValue", - types.TimestampKind: "google.protobuf.Timestamp", - types.UintKind: "google.protobuf.UInt64Value", - } -) diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go index f232f30daca..ef58df766b0 100644 --- a/vendor/github.com/google/cel-go/checker/cost.go +++ b/vendor/github.com/google/cel-go/checker/cost.go @@ -18,9 +18,7 @@ import ( "math" "github.com/google/cel-go/common" - "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types" "github.com/google/cel-go/parser" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -56,7 +54,7 @@ type AstNode interface { // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. Path() []string // Type returns the deduced type of the AstNode. - Type() *types.Type + Type() *exprpb.Type // Expr returns the expression of the AstNode. Expr() *exprpb.Expr // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. @@ -68,7 +66,7 @@ type AstNode interface { type astNode struct { path []string - t *types.Type + t *exprpb.Type expr *exprpb.Expr derivedSize *SizeEstimate } @@ -77,7 +75,7 @@ func (e astNode) Path() []string { return e.path } -func (e astNode) Type() *types.Type { +func (e astNode) Type() *exprpb.Type { return e.t } @@ -230,7 +228,7 @@ func addUint64NoOverflow(x, y uint64) uint64 { // multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 // is returned. func multiplyUint64NoOverflow(x, y uint64) uint64 { - if y != 0 && x > math.MaxUint64/y { + if x > 0 && y > 0 && x > math.MaxUint64/y { return math.MaxUint64 } return x * y @@ -242,11 +240,7 @@ func multiplyByCostFactor(x uint64, y float64) uint64 { if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y { return math.MaxUint64 } - ceil := math.Ceil(xFloat * y) - if ceil >= doubleTwoTo64 { - return math.MaxUint64 - } - return uint64(ceil) + return uint64(math.Ceil(xFloat * y)) } var ( @@ -264,10 +258,9 @@ type coster struct { // iterRanges tracks the iterRange of each iterVar. iterRanges iterRangeScopes // computedSizes tracks the computed sizes of call results. - computedSizes map[int64]SizeEstimate - checkedAST *ast.CheckedAST - estimator CostEstimator - overloadEstimators map[string]FunctionEstimator + computedSizes map[int64]SizeEstimate + checkedExpr *exprpb.CheckedExpr + estimator CostEstimator // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. presenceTestCost CostEstimate } @@ -296,7 +289,6 @@ func (vs iterRangeScopes) peek(varName string) (int64, bool) { type CostOption func(*coster) error // PresenceTestHasCost determines whether presence testing has a cost of one or zero. -// // Defaults to presence test has a cost of one. func PresenceTestHasCost(hasCost bool) CostOption { return func(c *coster) error { @@ -309,30 +301,15 @@ func PresenceTestHasCost(hasCost bool) CostOption { } } -// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair. -type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate - -// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID. -// -// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to -// the Cost() call. -func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption { - return func(c *coster) error { - c.overloadEstimators[overloadID] = functionCoster - return nil - } -} - // Cost estimates the cost of the parsed and type checked CEL expression. -func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { +func Cost(checker *exprpb.CheckedExpr, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { c := &coster{ - checkedAST: checker, - estimator: estimator, - overloadEstimators: map[string]FunctionEstimator{}, - exprPath: map[int64][]string{}, - iterRanges: map[string][]int64{}, - computedSizes: map[int64]SizeEstimate{}, - presenceTestCost: CostEstimate{Min: 1, Max: 1}, + checkedExpr: checker, + estimator: estimator, + exprPath: map[int64][]string{}, + iterRanges: map[string][]int64{}, + computedSizes: map[int64]SizeEstimate{}, + presenceTestCost: CostEstimate{Min: 1, Max: 1}, } for _, opt := range opts { err := opt(c) @@ -340,7 +317,7 @@ func Cost(checker *ast.CheckedAST, estimator CostEstimator, opts ...CostOption) return CostEstimate{}, err } } - return c.cost(checker.Expr), nil + return c.cost(checker.GetExpr()), nil } func (c *coster) cost(e *exprpb.Expr) CostEstimate { @@ -374,10 +351,10 @@ func (c *coster) costIdent(e *exprpb.Expr) CostEstimate { // build and track the field path if iterRange, ok := c.iterRanges.peek(identExpr.GetName()); ok { - switch c.checkedAST.TypeMap[iterRange].Kind() { - case types.ListKind: + switch c.checkedExpr.TypeMap[iterRange].GetTypeKind().(type) { + case *exprpb.Type_ListType_: c.addPath(e, append(c.exprPath[iterRange], "@items")) - case types.MapKind: + case *exprpb.Type_MapType_: c.addPath(e, append(c.exprPath[iterRange], "@keys")) } } else { @@ -401,8 +378,8 @@ func (c *coster) costSelect(e *exprpb.Expr) CostEstimate { } sum = sum.Add(c.cost(sel.GetOperand())) targetType := c.getType(sel.GetOperand()) - switch targetType.Kind() { - case types.MapKind, types.StructKind, types.TypeParamKind: + switch kindOf(targetType) { + case kindMap, kindObject, kindTypeParam: sum = sum.Add(selectAndIdentCost) } @@ -426,8 +403,8 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { argTypes[i] = c.newAstNode(arg) } - ref := c.checkedAST.ReferenceMap[e.GetId()] - if ref == nil || len(ref.OverloadIDs) == 0 { + ref := c.checkedExpr.ReferenceMap[e.GetId()] + if ref == nil || len(ref.GetOverloadId()) == 0 { return CostEstimate{} } var targetType AstNode @@ -440,7 +417,7 @@ func (c *coster) costCall(e *exprpb.Expr) CostEstimate { // Pick a cost estimate range that covers all the overload cost estimation ranges fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} var resultSize *SizeEstimate - for _, overload := range ref.OverloadIDs { + for _, overload := range ref.GetOverloadId() { overloadCost := c.functionCost(call.GetFunction(), overload, &targetType, argTypes, argCosts) fnCost = fnCost.Union(overloadCost.CostEstimate) if overloadCost.ResultSize != nil { @@ -553,14 +530,7 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args } return sum } - if len(c.overloadEstimators) != 0 { - if estimator, found := c.overloadEstimators[overloadID]; found { - if est := estimator(c.estimator, target, args); est != nil { - callEst := *est - return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} - } - } - } + if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil { callEst := *est return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} @@ -671,8 +641,8 @@ func (c *coster) functionCost(function, overloadID string, target *AstNode, args return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} } -func (c *coster) getType(e *exprpb.Expr) *types.Type { - return c.checkedAST.TypeMap[e.GetId()] +func (c *coster) getType(e *exprpb.Expr) *exprpb.Type { + return c.checkedExpr.TypeMap[e.GetId()] } func (c *coster) getPath(e *exprpb.Expr) []string { @@ -693,24 +663,22 @@ func (c *coster) newAstNode(e *exprpb.Expr) *astNode { if size, ok := c.computedSizes[e.GetId()]; ok { derivedSize = &size } - return &astNode{ - path: path, - t: c.getType(e), - expr: e, - derivedSize: derivedSize} + return &astNode{path: path, t: c.getType(e), expr: e, derivedSize: derivedSize} } // isScalar returns true if the given type is known to be of a constant size at // compile time. isScalar will return false for strings (they are variable-width) // in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time). -func isScalar(t *types.Type) bool { - switch t.Kind() { - case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind: - return true +func isScalar(t *exprpb.Type) bool { + switch kindOf(t) { + case kindPrimitive: + if t.GetPrimitive() != exprpb.Type_STRING && t.GetPrimitive() != exprpb.Type_BYTES { + return true + } + case kindWellKnown: + if t.GetWellKnown() == exprpb.Type_DURATION || t.GetWellKnown() == exprpb.Type_TIMESTAMP { + return true + } } return false } - -var ( - doubleTwoTo64 = math.Ldexp(1.0, 64) -) diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel index a6b0be292ce..9384be4507c 100644 --- a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel +++ b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel @@ -9,6 +9,7 @@ go_library( name = "go_default_library", srcs = [ "decls.go", + "scopes.go", ], importpath = "github.com/google/cel-go/checker/decls", deps = [ diff --git a/vendor/github.com/google/cel-go/checker/scopes.go b/vendor/github.com/google/cel-go/checker/decls/scopes.go similarity index 81% rename from vendor/github.com/google/cel-go/checker/scopes.go rename to vendor/github.com/google/cel-go/checker/decls/scopes.go index 8bb73ddb6a2..608bca3e537 100644 --- a/vendor/github.com/google/cel-go/checker/scopes.go +++ b/vendor/github.com/google/cel-go/checker/decls/scopes.go @@ -12,11 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package checker +package decls -import ( - "github.com/google/cel-go/common/decls" -) +import exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" // Scopes represents nested Decl sets where the Scopes value contains a Groups containing all // identifiers in scope and an optional parent representing outer scopes. @@ -27,9 +25,9 @@ type Scopes struct { scopes *Group } -// newScopes creates a new, empty Scopes. +// NewScopes creates a new, empty Scopes. // Some operations can't be safely performed until a Group is added with Push. -func newScopes() *Scopes { +func NewScopes() *Scopes { return &Scopes{ scopes: newGroup(), } @@ -37,7 +35,7 @@ func newScopes() *Scopes { // Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil. func (s *Scopes) Copy() *Scopes { - cpy := newScopes() + cpy := NewScopes() if s == nil { return cpy } @@ -68,14 +66,14 @@ func (s *Scopes) Pop() *Scopes { // AddIdent adds the ident Decl in the current scope. // Note: If the name collides with an existing identifier in the scope, the Decl is overwritten. -func (s *Scopes) AddIdent(decl *decls.VariableDecl) { - s.scopes.idents[decl.Name()] = decl +func (s *Scopes) AddIdent(decl *exprpb.Decl) { + s.scopes.idents[decl.Name] = decl } // FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be // found. // Note: The search is performed from innermost to outermost. -func (s *Scopes) FindIdent(name string) *decls.VariableDecl { +func (s *Scopes) FindIdent(name string) *exprpb.Decl { if ident, found := s.scopes.idents[name]; found { return ident } @@ -88,7 +86,7 @@ func (s *Scopes) FindIdent(name string) *decls.VariableDecl { // FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or // nil if one does not exist. // Note: The search is only performed on the current scope and does not search outer scopes. -func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl { +func (s *Scopes) FindIdentInScope(name string) *exprpb.Decl { if ident, found := s.scopes.idents[name]; found { return ident } @@ -97,14 +95,14 @@ func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl { // SetFunction adds the function Decl to the current scope. // Note: Any previous entry for a function in the current scope with the same name is overwritten. -func (s *Scopes) SetFunction(fn *decls.FunctionDecl) { - s.scopes.functions[fn.Name()] = fn +func (s *Scopes) SetFunction(fn *exprpb.Decl) { + s.scopes.functions[fn.Name] = fn } // FindFunction finds the first function Decl with a matching name in Scopes. // The search is performed from innermost to outermost. // Returns nil if no such function in Scopes. -func (s *Scopes) FindFunction(name string) *decls.FunctionDecl { +func (s *Scopes) FindFunction(name string) *exprpb.Decl { if fn, found := s.scopes.functions[name]; found { return fn } @@ -118,16 +116,16 @@ func (s *Scopes) FindFunction(name string) *decls.FunctionDecl { // Contains separate namespaces for identifier and function Decls. // (Should be named "Scope" perhaps?) type Group struct { - idents map[string]*decls.VariableDecl - functions map[string]*decls.FunctionDecl + idents map[string]*exprpb.Decl + functions map[string]*exprpb.Decl } // copy creates a new Group instance with a shallow copy of the variables and functions. // If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write. func (g *Group) copy() *Group { cpy := &Group{ - idents: make(map[string]*decls.VariableDecl, len(g.idents)), - functions: make(map[string]*decls.FunctionDecl, len(g.functions)), + idents: make(map[string]*exprpb.Decl, len(g.idents)), + functions: make(map[string]*exprpb.Decl, len(g.functions)), } for n, id := range g.idents { cpy.idents[n] = id @@ -141,7 +139,7 @@ func (g *Group) copy() *Group { // newGroup creates a new Group with empty maps for identifiers and functions. func newGroup() *Group { return &Group{ - idents: make(map[string]*decls.VariableDecl), - functions: make(map[string]*decls.FunctionDecl), + idents: make(map[string]*exprpb.Decl), + functions: make(map[string]*exprpb.Decl), } } diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go index 70682b17c6b..be89d2d68d7 100644 --- a/vendor/github.com/google/cel-go/checker/env.go +++ b/vendor/github.com/google/cel-go/checker/env.go @@ -18,11 +18,17 @@ import ( "fmt" "strings" + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/decls" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) type aggregateLiteralElementType int @@ -70,15 +76,15 @@ var ( // which can be used to assist with type-checking. type Env struct { container *containers.Container - provider types.Provider - declarations *Scopes + provider ref.TypeProvider + declarations *decls.Scopes aggLitElemType aggregateLiteralElementType filteredOverloadIDs map[string]struct{} } // NewEnv returns a new *Env with the given parameters. -func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) { - declarations := newScopes() +func NewEnv(container *containers.Container, provider ref.TypeProvider, opts ...Option) (*Env, error) { + declarations := decls.NewScopes() declarations.Push() envOptions := &options{} @@ -107,31 +113,24 @@ func NewEnv(container *containers.Container, provider types.Provider, opts ...Op }, nil } -// AddIdents configures the checker with a list of variable declarations. -// -// If there are overlapping declarations, the method will error. -func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error { - errMsgs := make([]errorMsg, 0) - for _, d := range declarations { - errMsgs = append(errMsgs, e.addIdent(d)) - } - return formatError(errMsgs) -} - -// AddFunctions configures the checker with a list of function declarations. -// -// If there are overlapping declarations, the method will error. -func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error { +// Add adds new Decl protos to the Env. +// Returns an error for identifier redeclarations. +func (e *Env) Add(decls ...*exprpb.Decl) error { errMsgs := make([]errorMsg, 0) - for _, d := range declarations { - errMsgs = append(errMsgs, e.setFunction(d)...) + for _, decl := range decls { + switch decl.DeclKind.(type) { + case *exprpb.Decl_Ident: + errMsgs = append(errMsgs, e.addIdent(sanitizeIdent(decl))) + case *exprpb.Decl_Function: + errMsgs = append(errMsgs, e.setFunction(sanitizeFunction(decl))...) + } } return formatError(errMsgs) } // LookupIdent returns a Decl proto for typeName as an identifier in the Env. // Returns nil if no such identifier is found in the Env. -func (e *Env) LookupIdent(name string) *decls.VariableDecl { +func (e *Env) LookupIdent(name string) *exprpb.Decl { for _, candidate := range e.container.ResolveCandidateNames(name) { if ident := e.declarations.FindIdent(candidate); ident != nil { return ident @@ -140,8 +139,8 @@ func (e *Env) LookupIdent(name string) *decls.VariableDecl { // Next try to import the name as a reference to a message type. If found, // the declaration is added to the outest (global) scope of the // environment, so next time we can access it faster. - if t, found := e.provider.FindStructType(candidate); found { - decl := decls.NewVariable(candidate, t) + if t, found := e.provider.FindType(candidate); found { + decl := decls.NewVar(candidate, t) e.declarations.AddIdent(decl) return decl } @@ -149,7 +148,11 @@ func (e *Env) LookupIdent(name string) *decls.VariableDecl { // Next try to import this as an enum value by splitting the name in a type prefix and // the enum inside. if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { - decl := decls.NewConstant(candidate, types.IntType, enumValue) + decl := decls.NewIdent(candidate, + decls.Int, + &exprpb.Constant{ + ConstantKind: &exprpb.Constant_Int64Value{ + Int64Value: int64(enumValue.(types.Int))}}) e.declarations.AddIdent(decl) return decl } @@ -159,7 +162,7 @@ func (e *Env) LookupIdent(name string) *decls.VariableDecl { // LookupFunction returns a Decl proto for typeName as a function in env. // Returns nil if no such function is found in env. -func (e *Env) LookupFunction(name string) *decls.FunctionDecl { +func (e *Env) LookupFunction(name string) *exprpb.Decl { for _, candidate := range e.container.ResolveCandidateNames(name) { if fn := e.declarations.FindFunction(candidate); fn != nil { return fn @@ -168,46 +171,88 @@ func (e *Env) LookupFunction(name string) *decls.FunctionDecl { return nil } +// addOverload adds overload to function declaration f. +// Returns one or more errorMsg values if the overload overlaps with an existing overload or macro. +func (e *Env) addOverload(f *exprpb.Decl, overload *exprpb.Decl_FunctionDecl_Overload) []errorMsg { + errMsgs := make([]errorMsg, 0) + function := f.GetFunction() + emptyMappings := newMapping() + overloadFunction := decls.NewFunctionType(overload.GetResultType(), + overload.GetParams()...) + overloadErased := substitute(emptyMappings, overloadFunction, true) + for _, existing := range function.GetOverloads() { + existingFunction := decls.NewFunctionType(existing.GetResultType(), existing.GetParams()...) + existingErased := substitute(emptyMappings, existingFunction, true) + overlap := isAssignable(emptyMappings, overloadErased, existingErased) != nil || + isAssignable(emptyMappings, existingErased, overloadErased) != nil + if overlap && + overload.GetIsInstanceFunction() == existing.GetIsInstanceFunction() { + errMsgs = append(errMsgs, + overlappingOverloadError(f.Name, + overload.GetOverloadId(), overloadFunction, + existing.GetOverloadId(), existingFunction)) + } + } + + for _, macro := range parser.AllMacros { + if macro.Function() == f.Name && + macro.IsReceiverStyle() == overload.GetIsInstanceFunction() && + macro.ArgCount() == len(overload.GetParams()) { + errMsgs = append(errMsgs, overlappingMacroError(f.Name, macro.ArgCount())) + } + } + if len(errMsgs) > 0 { + return errMsgs + } + function.Overloads = append(function.GetOverloads(), overload) + return errMsgs +} + // setFunction adds the function Decl to the Env. // Adds a function decl if one doesn't already exist, then adds all overloads from the Decl. // If overload overlaps with an existing overload, adds to the errors in the Env instead. -func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg { - errMsgs := make([]errorMsg, 0) - current := e.declarations.FindFunction(fn.Name()) - if current != nil { - var err error - current, err = current.Merge(fn) - if err != nil { - return append(errMsgs, errorMsg(err.Error())) - } +func (e *Env) setFunction(decl *exprpb.Decl) []errorMsg { + errorMsgs := make([]errorMsg, 0) + overloads := decl.GetFunction().GetOverloads() + current := e.declarations.FindFunction(decl.Name) + if current == nil { + //Add the function declaration without overloads and check the overloads below. + current = decls.NewFunction(decl.Name) } else { - current = fn - } - for _, overload := range current.OverloadDecls() { - for _, macro := range parser.AllMacros { - if macro.Function() == current.Name() && - macro.IsReceiverStyle() == overload.IsMemberFunction() && - macro.ArgCount() == len(overload.ArgTypes()) { - errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount())) + existingOverloads := map[string]*exprpb.Decl_FunctionDecl_Overload{} + for _, overload := range current.GetFunction().GetOverloads() { + existingOverloads[overload.GetOverloadId()] = overload + } + newOverloads := []*exprpb.Decl_FunctionDecl_Overload{} + for _, overload := range overloads { + existing, found := existingOverloads[overload.GetOverloadId()] + if !found || !overloadsEqual(existing, overload) { + newOverloads = append(newOverloads, overload) } } - if len(errMsgs) > 0 { - return errMsgs + overloads = newOverloads + if len(newOverloads) == 0 { + return errorMsgs } + // Copy on write since we don't know where this original definition came from. + current = proto.Clone(current).(*exprpb.Decl) } e.declarations.SetFunction(current) - return errMsgs + for _, overload := range overloads { + errorMsgs = append(errorMsgs, e.addOverload(current, overload)...) + } + return errorMsgs } // addIdent adds the Decl to the declarations in the Env. // Returns a non-empty errorMsg if the identifier is already declared in the scope. -func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg { - current := e.declarations.FindIdentInScope(decl.Name()) +func (e *Env) addIdent(decl *exprpb.Decl) errorMsg { + current := e.declarations.FindIdentInScope(decl.Name) if current != nil { - if current.DeclarationIsEquivalent(decl) { + if proto.Equal(current, decl) { return "" } - return overlappingIdentifierError(decl.Name()) + return overlappingIdentifierError(decl.Name) } e.declarations.AddIdent(decl) return "" @@ -219,9 +264,111 @@ func (e *Env) isOverloadDisabled(overloadID string) bool { return found } +// overloadsEqual returns whether two overloads have identical signatures. +// +// type parameter names are ignored as they may be specified in any order and have no bearing on overload +// equivalence +func overloadsEqual(o1, o2 *exprpb.Decl_FunctionDecl_Overload) bool { + return o1.GetOverloadId() == o2.GetOverloadId() && + o1.GetIsInstanceFunction() == o2.GetIsInstanceFunction() && + paramsEqual(o1.GetParams(), o2.GetParams()) && + proto.Equal(o1.GetResultType(), o2.GetResultType()) +} + +// paramsEqual returns whether two lists have equal length and all types are equal +func paramsEqual(p1, p2 []*exprpb.Type) bool { + if len(p1) != len(p2) { + return false + } + for i, a := range p1 { + b := p2[i] + if !proto.Equal(a, b) { + return false + } + } + return true +} + +// sanitizeFunction replaces well-known types referenced by message name with their equivalent +// CEL built-in type instances. +func sanitizeFunction(decl *exprpb.Decl) *exprpb.Decl { + fn := decl.GetFunction() + // Determine whether the declaration requires replacements from proto-based message type + // references to well-known CEL type references. + var needsSanitizing bool + for _, o := range fn.GetOverloads() { + if isObjectWellKnownType(o.GetResultType()) { + needsSanitizing = true + break + } + for _, p := range o.GetParams() { + if isObjectWellKnownType(p) { + needsSanitizing = true + break + } + } + } + + // Early return if the declaration requires no modification. + if !needsSanitizing { + return decl + } + + // Sanitize all of the overloads if any overload requires an update to its type references. + overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(fn.GetOverloads())) + for i, o := range fn.GetOverloads() { + rt := o.GetResultType() + if isObjectWellKnownType(rt) { + rt = getObjectWellKnownType(rt) + } + params := make([]*exprpb.Type, len(o.GetParams())) + copy(params, o.GetParams()) + for j, p := range params { + if isObjectWellKnownType(p) { + params[j] = getObjectWellKnownType(p) + } + } + // If sanitized, replace the overload definition. + if o.IsInstanceFunction { + overloads[i] = + decls.NewInstanceOverload(o.GetOverloadId(), params, rt) + } else { + overloads[i] = + decls.NewOverload(o.GetOverloadId(), params, rt) + } + } + return decls.NewFunction(decl.GetName(), overloads...) +} + +// sanitizeIdent replaces the identifier's well-known types referenced by message name with +// references to CEL built-in type instances. +func sanitizeIdent(decl *exprpb.Decl) *exprpb.Decl { + id := decl.GetIdent() + t := id.GetType() + if !isObjectWellKnownType(t) { + return decl + } + return decls.NewIdent(decl.GetName(), getObjectWellKnownType(t), id.GetValue()) +} + +// isObjectWellKnownType returns true if the input type is an OBJECT type with a message name +// that corresponds the message name of a built-in CEL type. +func isObjectWellKnownType(t *exprpb.Type) bool { + if kindOf(t) != kindObject { + return false + } + _, found := pb.CheckedWellKnowns[t.GetMessageType()] + return found +} + +// getObjectWellKnownType returns the built-in CEL type declaration for input type's message name. +func getObjectWellKnownType(t *exprpb.Type) *exprpb.Type { + return pb.CheckedWellKnowns[t.GetMessageType()] +} + // validatedDeclarations returns a reference to the validated variable and function declaration scope stack. // must be copied before use. -func (e *Env) validatedDeclarations() *Scopes { +func (e *Env) validatedDeclarations() *decls.Scopes { return e.declarations } @@ -255,6 +402,19 @@ func overlappingIdentifierError(name string) errorMsg { return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) } +func overlappingOverloadError(name string, + overloadID1 string, f1 *exprpb.Type, + overloadID2 string, f2 *exprpb.Type) errorMsg { + return errorMsg(fmt.Sprintf( + "overlapping overload for name '%s' (type '%s' with overloadId: '%s' "+ + "cannot be distinguished from '%s' with overloadId: '%s')", + name, + FormatCheckedType(f1), + overloadID1, + FormatCheckedType(f2), + overloadID2)) +} + func overlappingMacroError(name string, argCount int) errorMsg { return errorMsg(fmt.Sprintf( "overlapping macro for name '%s' with %d args", name, argCount)) diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go index c2b96498d1d..0014f9abe1c 100644 --- a/vendor/github.com/google/cel-go/checker/errors.go +++ b/vendor/github.com/google/cel-go/checker/errors.go @@ -15,78 +15,82 @@ package checker import ( - "reflect" - "github.com/google/cel-go/common" - "github.com/google/cel-go/common/ast" - "github.com/google/cel-go/common/types" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // typeErrors is a specialization of Errors. type typeErrors struct { - errs *common.Errors -} - -func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) { - e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'", - name, FormatCELType(field), FormatCELType(value)) -} - -func (e *typeErrors) incompatibleType(id int64, l common.Location, ex *exprpb.Expr, prev, next *types.Type) { - e.errs.ReportErrorAtID(id, l, - "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) -} - -func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) { - signature := formatFunctionDeclType(nil, args, isInstance) - e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature) + *common.Errors } -func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) { - e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", - FormatCELType(t)) +func (e *typeErrors) undeclaredReference(l common.Location, container string, name string) { + e.ReportError(l, "undeclared reference to '%s' (in container '%s')", name, container) } -func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field *exprpb.Expr) { - e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field) +func (e *typeErrors) typeDoesNotSupportFieldSelection(l common.Location, t *exprpb.Type) { + e.ReportError(l, "type '%s' does not support field selection", t) } -func (e *typeErrors) notAType(id int64, l common.Location, typeName string) { - e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName) +func (e *typeErrors) undefinedField(l common.Location, field string) { + e.ReportError(l, "undefined field '%s'", field) } -func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) { - e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName) +func (e *typeErrors) noMatchingOverload(l common.Location, name string, args []*exprpb.Type, isInstance bool) { + signature := formatFunction(nil, args, isInstance) + e.ReportError(l, "found no matching overload for '%s' applied to '%s'", name, signature) } -func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex *exprpb.Expr, prev, next *ast.ReferenceInfo) { - e.errs.ReportErrorAtID(id, l, - "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.GetId(), prev, next) +func (e *typeErrors) notAType(l common.Location, t *exprpb.Type) { + e.ReportError(l, "'%s(%v)' is not a type", FormatCheckedType(t), t) } -func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) { - e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t)) +func (e *typeErrors) notAMessageType(l common.Location, t *exprpb.Type) { + e.ReportError(l, "'%s' is not a message type", FormatCheckedType(t)) } -func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) { - e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'", - FormatCELType(expected), FormatCELType(actual)) +func (e *typeErrors) fieldTypeMismatch(l common.Location, name string, field *exprpb.Type, value *exprpb.Type) { + e.ReportError(l, "expected type of field '%s' is '%s' but provided type is '%s'", + name, FormatCheckedType(field), FormatCheckedType(value)) } -func (e *typeErrors) undefinedField(id int64, l common.Location, field string) { - e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field) +func (e *typeErrors) unexpectedFailedResolution(l common.Location, typeName string) { + e.ReportError(l, "[internal] unexpected failed resolution of '%s'", typeName) } -func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) { - e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container) +func (e *typeErrors) notAComprehensionRange(l common.Location, t *exprpb.Type) { + e.ReportError(l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", + FormatCheckedType(t)) } -func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) { - e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName) +func (e *typeErrors) typeMismatch(l common.Location, expected *exprpb.Type, actual *exprpb.Type) { + e.ReportError(l, "expected type '%s' but found '%s'", + FormatCheckedType(expected), FormatCheckedType(actual)) } -func (e *typeErrors) unexpectedASTType(id int64, l common.Location, ex *exprpb.Expr) { - e.errs.ReportErrorAtID(id, l, "unrecognized ast type: %v", reflect.TypeOf(ex)) +func formatFunction(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { + result := "" + if isInstance { + target := argTypes[0] + argTypes = argTypes[1:] + + result += FormatCheckedType(target) + result += "." + } + + result += "(" + for i, arg := range argTypes { + if i > 0 { + result += ", " + } + result += FormatCheckedType(arg) + } + result += ")" + if resultType != nil { + result += " -> " + result += FormatCheckedType(resultType) + } + + return result } diff --git a/vendor/github.com/google/cel-go/checker/format.go b/vendor/github.com/google/cel-go/checker/format.go deleted file mode 100644 index 95842905e6d..00000000000 --- a/vendor/github.com/google/cel-go/checker/format.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package checker - -import ( - "fmt" - "strings" - - chkdecls "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/types" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -const ( - kindUnknown = iota + 1 - kindError - kindFunction - kindDyn - kindPrimitive - kindWellKnown - kindWrapper - kindNull - kindAbstract - kindType - kindList - kindMap - kindObject - kindTypeParam -) - -// FormatCheckedType converts a type message into a string representation. -func FormatCheckedType(t *exprpb.Type) string { - switch kindOf(t) { - case kindDyn: - return "dyn" - case kindFunction: - return formatFunctionExprType(t.GetFunction().GetResultType(), - t.GetFunction().GetArgTypes(), - false) - case kindList: - return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType())) - case kindObject: - return t.GetMessageType() - case kindMap: - return fmt.Sprintf("map(%s, %s)", - FormatCheckedType(t.GetMapType().GetKeyType()), - FormatCheckedType(t.GetMapType().GetValueType())) - case kindNull: - return "null" - case kindPrimitive: - switch t.GetPrimitive() { - case exprpb.Type_UINT64: - return "uint" - case exprpb.Type_INT64: - return "int" - } - return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") - case kindType: - if t.GetType() == nil || t.GetType().GetTypeKind() == nil { - return "type" - } - return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) - case kindWellKnown: - switch t.GetWellKnown() { - case exprpb.Type_ANY: - return "any" - case exprpb.Type_DURATION: - return "duration" - case exprpb.Type_TIMESTAMP: - return "timestamp" - } - case kindWrapper: - return fmt.Sprintf("wrapper(%s)", - FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper()))) - case kindError: - return "!error!" - case kindTypeParam: - return t.GetTypeParam() - case kindAbstract: - at := t.GetAbstractType() - params := at.GetParameterTypes() - paramStrs := make([]string, len(params)) - for i, p := range params { - paramStrs[i] = FormatCheckedType(p) - } - return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", ")) - } - return t.String() -} - -type formatter func(any) string - -// FormatCELType formats a types.Type value to a string representation. -// -// The type formatting is identical to FormatCheckedType. -func FormatCELType(t any) string { - dt := t.(*types.Type) - switch dt.Kind() { - case types.AnyKind: - return "any" - case types.DurationKind: - return "duration" - case types.ErrorKind: - return "!error!" - case types.NullTypeKind: - return "null" - case types.TimestampKind: - return "timestamp" - case types.TypeParamKind: - return dt.TypeName() - case types.OpaqueKind: - if dt.TypeName() == "function" { - // There is no explicit function type in the new types representation, so information like - // whether the function is a member function is absent. - return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false) - } - case types.UnspecifiedKind: - return "" - } - if len(dt.Parameters()) == 0 { - return dt.DeclaredTypeName() - } - paramTypeNames := make([]string, 0, len(dt.Parameters())) - for _, p := range dt.Parameters() { - paramTypeNames = append(paramTypeNames, FormatCELType(p)) - } - return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", ")) -} - -func formatExprType(t any) string { - if t == nil { - return "" - } - return FormatCheckedType(t.(*exprpb.Type)) -} - -func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { - return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType) -} - -func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string { - return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType) -} - -func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string { - result := "" - if isInstance { - target := argTypes[0] - argTypes = argTypes[1:] - result += format(target) - result += "." - } - result += "(" - for i, arg := range argTypes { - if i > 0 { - result += ", " - } - result += format(arg) - } - result += ")" - rt := format(resultType) - if rt != "" { - result += " -> " - result += rt - } - return result -} - -// kindOf returns the kind of the type as defined in the checked.proto. -func kindOf(t *exprpb.Type) int { - if t == nil || t.TypeKind == nil { - return kindUnknown - } - switch t.GetTypeKind().(type) { - case *exprpb.Type_Error: - return kindError - case *exprpb.Type_Function: - return kindFunction - case *exprpb.Type_Dyn: - return kindDyn - case *exprpb.Type_Primitive: - return kindPrimitive - case *exprpb.Type_WellKnown: - return kindWellKnown - case *exprpb.Type_Wrapper: - return kindWrapper - case *exprpb.Type_Null: - return kindNull - case *exprpb.Type_Type: - return kindType - case *exprpb.Type_ListType_: - return kindList - case *exprpb.Type_MapType_: - return kindMap - case *exprpb.Type_MessageType: - return kindObject - case *exprpb.Type_TypeParam: - return kindTypeParam - case *exprpb.Type_AbstractType_: - return kindAbstract - } - return kindUnknown -} diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go index 8163a908a5f..fbc55a28d9d 100644 --- a/vendor/github.com/google/cel-go/checker/mapping.go +++ b/vendor/github.com/google/cel-go/checker/mapping.go @@ -15,25 +15,25 @@ package checker import ( - "github.com/google/cel-go/common/types" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) type mapping struct { - mapping map[string]*types.Type + mapping map[string]*exprpb.Type } func newMapping() *mapping { return &mapping{ - mapping: make(map[string]*types.Type), + mapping: make(map[string]*exprpb.Type), } } -func (m *mapping) add(from, to *types.Type) { - m.mapping[FormatCELType(from)] = to +func (m *mapping) add(from *exprpb.Type, to *exprpb.Type) { + m.mapping[typeKey(from)] = to } -func (m *mapping) find(from *types.Type) (*types.Type, bool) { - if r, found := m.mapping[FormatCELType(from)]; found { +func (m *mapping) find(from *exprpb.Type) (*exprpb.Type, bool) { + if r, found := m.mapping[typeKey(from)]; found { return r, found } return nil, false diff --git a/vendor/github.com/google/cel-go/checker/options.go b/vendor/github.com/google/cel-go/checker/options.go index 0560c3813cf..cded00a660d 100644 --- a/vendor/github.com/google/cel-go/checker/options.go +++ b/vendor/github.com/google/cel-go/checker/options.go @@ -14,10 +14,12 @@ package checker +import "github.com/google/cel-go/checker/decls" + type options struct { crossTypeNumericComparisons bool homogeneousAggregateLiterals bool - validatedDeclarations *Scopes + validatedDeclarations *decls.Scopes } // Option is a functional option for configuring the type-checker @@ -32,6 +34,15 @@ func CrossTypeNumericComparisons(enabled bool) Option { } } +// HomogeneousAggregateLiterals toggles support for constructing lists and maps whose elements all +// have the same type. +func HomogeneousAggregateLiterals(enabled bool) Option { + return func(opts *options) error { + opts.homogeneousAggregateLiterals = enabled + return nil + } +} + // ValidatedDeclarations provides a references to validated declarations which will be copied // into new checker instances. func ValidatedDeclarations(env *Env) Option { diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go index 15cba06ee97..0cecc5210dd 100644 --- a/vendor/github.com/google/cel-go/checker/printer.go +++ b/vendor/github.com/google/cel-go/checker/printer.go @@ -15,8 +15,6 @@ package checker import ( - "sort" - "github.com/google/cel-go/common/debug" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" @@ -49,7 +47,6 @@ func (a *semanticAdorner) GetMetadata(elem any) string { if len(ref.GetOverloadId()) == 0 { result += "^" + ref.Name } else { - sort.Strings(ref.GetOverloadId()) for i, overload := range ref.GetOverloadId() { if i == 0 { result += "^" diff --git a/vendor/github.com/google/cel-go/checker/standard.go b/vendor/github.com/google/cel-go/checker/standard.go index 11b35b80ee2..e64337ba44a 100644 --- a/vendor/github.com/google/cel-go/checker/standard.go +++ b/vendor/github.com/google/cel-go/checker/standard.go @@ -15,21 +15,480 @@ package checker import ( - "github.com/google/cel-go/common/stdlib" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) -// StandardFunctions returns the Decls for all functions in the evaluator. -// -// Deprecated: prefer stdlib.FunctionExprDecls() -func StandardFunctions() []*exprpb.Decl { - return stdlib.FunctionExprDecls() +var ( + standardDeclarations []*exprpb.Decl +) + +func init() { + // Some shortcuts we use when building declarations. + paramA := decls.NewTypeParamType("A") + typeParamAList := []string{"A"} + listOfA := decls.NewListType(paramA) + paramB := decls.NewTypeParamType("B") + typeParamABList := []string{"A", "B"} + mapOfAB := decls.NewMapType(paramA, paramB) + + var idents []*exprpb.Decl + for _, t := range []*exprpb.Type{ + decls.Int, decls.Uint, decls.Bool, + decls.Double, decls.Bytes, decls.String} { + idents = append(idents, + decls.NewVar(FormatCheckedType(t), decls.NewTypeType(t))) + } + idents = append(idents, + decls.NewVar("list", decls.NewTypeType(listOfA)), + decls.NewVar("map", decls.NewTypeType(mapOfAB)), + decls.NewVar("null_type", decls.NewTypeType(decls.Null)), + decls.NewVar("type", decls.NewTypeType(decls.NewTypeType(nil)))) + + standardDeclarations = append(standardDeclarations, idents...) + standardDeclarations = append(standardDeclarations, []*exprpb.Decl{ + // Booleans + decls.NewFunction(operators.Conditional, + decls.NewParameterizedOverload(overloads.Conditional, + []*exprpb.Type{decls.Bool, paramA, paramA}, paramA, + typeParamAList)), + + decls.NewFunction(operators.LogicalAnd, + decls.NewOverload(overloads.LogicalAnd, + []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)), + + decls.NewFunction(operators.LogicalOr, + decls.NewOverload(overloads.LogicalOr, + []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool)), + + decls.NewFunction(operators.LogicalNot, + decls.NewOverload(overloads.LogicalNot, + []*exprpb.Type{decls.Bool}, decls.Bool)), + + decls.NewFunction(operators.NotStrictlyFalse, + decls.NewOverload(overloads.NotStrictlyFalse, + []*exprpb.Type{decls.Bool}, decls.Bool)), + + decls.NewFunction(operators.Equals, + decls.NewParameterizedOverload(overloads.Equals, + []*exprpb.Type{paramA, paramA}, decls.Bool, + typeParamAList)), + + decls.NewFunction(operators.NotEquals, + decls.NewParameterizedOverload(overloads.NotEquals, + []*exprpb.Type{paramA, paramA}, decls.Bool, + typeParamAList)), + + // Algebra. + + decls.NewFunction(operators.Subtract, + decls.NewOverload(overloads.SubtractInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Int), + decls.NewOverload(overloads.SubtractUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), + decls.NewOverload(overloads.SubtractDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Double), + decls.NewOverload(overloads.SubtractTimestampTimestamp, + []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Duration), + decls.NewOverload(overloads.SubtractTimestampDuration, + []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp), + decls.NewOverload(overloads.SubtractDurationDuration, + []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)), + + decls.NewFunction(operators.Multiply, + decls.NewOverload(overloads.MultiplyInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Int), + decls.NewOverload(overloads.MultiplyUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), + decls.NewOverload(overloads.MultiplyDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Double)), + + decls.NewFunction(operators.Divide, + decls.NewOverload(overloads.DivideInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Int), + decls.NewOverload(overloads.DivideUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), + decls.NewOverload(overloads.DivideDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Double)), + + decls.NewFunction(operators.Modulo, + decls.NewOverload(overloads.ModuloInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Int), + decls.NewOverload(overloads.ModuloUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint)), + + decls.NewFunction(operators.Add, + decls.NewOverload(overloads.AddInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Int), + decls.NewOverload(overloads.AddUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Uint), + decls.NewOverload(overloads.AddDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Double), + decls.NewOverload(overloads.AddString, + []*exprpb.Type{decls.String, decls.String}, decls.String), + decls.NewOverload(overloads.AddBytes, + []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bytes), + decls.NewParameterizedOverload(overloads.AddList, + []*exprpb.Type{listOfA, listOfA}, listOfA, + typeParamAList), + decls.NewOverload(overloads.AddTimestampDuration, + []*exprpb.Type{decls.Timestamp, decls.Duration}, decls.Timestamp), + decls.NewOverload(overloads.AddDurationTimestamp, + []*exprpb.Type{decls.Duration, decls.Timestamp}, decls.Timestamp), + decls.NewOverload(overloads.AddDurationDuration, + []*exprpb.Type{decls.Duration, decls.Duration}, decls.Duration)), + + decls.NewFunction(operators.Negate, + decls.NewOverload(overloads.NegateInt64, + []*exprpb.Type{decls.Int}, decls.Int), + decls.NewOverload(overloads.NegateDouble, + []*exprpb.Type{decls.Double}, decls.Double)), + + // Index. + + decls.NewFunction(operators.Index, + decls.NewParameterizedOverload(overloads.IndexList, + []*exprpb.Type{listOfA, decls.Int}, paramA, + typeParamAList), + decls.NewParameterizedOverload(overloads.IndexMap, + []*exprpb.Type{mapOfAB, paramA}, paramB, + typeParamABList)), + + // Collections. + + decls.NewFunction(overloads.Size, + decls.NewInstanceOverload(overloads.SizeStringInst, + []*exprpb.Type{decls.String}, decls.Int), + decls.NewInstanceOverload(overloads.SizeBytesInst, + []*exprpb.Type{decls.Bytes}, decls.Int), + decls.NewParameterizedInstanceOverload(overloads.SizeListInst, + []*exprpb.Type{listOfA}, decls.Int, typeParamAList), + decls.NewParameterizedInstanceOverload(overloads.SizeMapInst, + []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList), + decls.NewOverload(overloads.SizeString, + []*exprpb.Type{decls.String}, decls.Int), + decls.NewOverload(overloads.SizeBytes, + []*exprpb.Type{decls.Bytes}, decls.Int), + decls.NewParameterizedOverload(overloads.SizeList, + []*exprpb.Type{listOfA}, decls.Int, typeParamAList), + decls.NewParameterizedOverload(overloads.SizeMap, + []*exprpb.Type{mapOfAB}, decls.Int, typeParamABList)), + + decls.NewFunction(operators.In, + decls.NewParameterizedOverload(overloads.InList, + []*exprpb.Type{paramA, listOfA}, decls.Bool, + typeParamAList), + decls.NewParameterizedOverload(overloads.InMap, + []*exprpb.Type{paramA, mapOfAB}, decls.Bool, + typeParamABList)), + + // Deprecated 'in()' function. + + decls.NewFunction(overloads.DeprecatedIn, + decls.NewParameterizedOverload(overloads.InList, + []*exprpb.Type{paramA, listOfA}, decls.Bool, + typeParamAList), + decls.NewParameterizedOverload(overloads.InMap, + []*exprpb.Type{paramA, mapOfAB}, decls.Bool, + typeParamABList)), + + // Conversions to type. + + decls.NewFunction(overloads.TypeConvertType, + decls.NewParameterizedOverload(overloads.TypeConvertType, + []*exprpb.Type{paramA}, decls.NewTypeType(paramA), typeParamAList)), + + // Conversions to int. + + decls.NewFunction(overloads.TypeConvertInt, + decls.NewOverload(overloads.IntToInt, []*exprpb.Type{decls.Int}, decls.Int), + decls.NewOverload(overloads.UintToInt, []*exprpb.Type{decls.Uint}, decls.Int), + decls.NewOverload(overloads.DoubleToInt, []*exprpb.Type{decls.Double}, decls.Int), + decls.NewOverload(overloads.StringToInt, []*exprpb.Type{decls.String}, decls.Int), + decls.NewOverload(overloads.TimestampToInt, []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewOverload(overloads.DurationToInt, []*exprpb.Type{decls.Duration}, decls.Int)), + + // Conversions to uint. + + decls.NewFunction(overloads.TypeConvertUint, + decls.NewOverload(overloads.UintToUint, []*exprpb.Type{decls.Uint}, decls.Uint), + decls.NewOverload(overloads.IntToUint, []*exprpb.Type{decls.Int}, decls.Uint), + decls.NewOverload(overloads.DoubleToUint, []*exprpb.Type{decls.Double}, decls.Uint), + decls.NewOverload(overloads.StringToUint, []*exprpb.Type{decls.String}, decls.Uint)), + + // Conversions to double. + + decls.NewFunction(overloads.TypeConvertDouble, + decls.NewOverload(overloads.DoubleToDouble, []*exprpb.Type{decls.Double}, decls.Double), + decls.NewOverload(overloads.IntToDouble, []*exprpb.Type{decls.Int}, decls.Double), + decls.NewOverload(overloads.UintToDouble, []*exprpb.Type{decls.Uint}, decls.Double), + decls.NewOverload(overloads.StringToDouble, []*exprpb.Type{decls.String}, decls.Double)), + + // Conversions to bool. + + decls.NewFunction(overloads.TypeConvertBool, + decls.NewOverload(overloads.BoolToBool, []*exprpb.Type{decls.Bool}, decls.Bool), + decls.NewOverload(overloads.StringToBool, []*exprpb.Type{decls.String}, decls.Bool)), + + // Conversions to string. + + decls.NewFunction(overloads.TypeConvertString, + decls.NewOverload(overloads.StringToString, []*exprpb.Type{decls.String}, decls.String), + decls.NewOverload(overloads.BoolToString, []*exprpb.Type{decls.Bool}, decls.String), + decls.NewOverload(overloads.IntToString, []*exprpb.Type{decls.Int}, decls.String), + decls.NewOverload(overloads.UintToString, []*exprpb.Type{decls.Uint}, decls.String), + decls.NewOverload(overloads.DoubleToString, []*exprpb.Type{decls.Double}, decls.String), + decls.NewOverload(overloads.BytesToString, []*exprpb.Type{decls.Bytes}, decls.String), + decls.NewOverload(overloads.TimestampToString, []*exprpb.Type{decls.Timestamp}, decls.String), + decls.NewOverload(overloads.DurationToString, []*exprpb.Type{decls.Duration}, decls.String)), + + // Conversions to bytes. + + decls.NewFunction(overloads.TypeConvertBytes, + decls.NewOverload(overloads.BytesToBytes, []*exprpb.Type{decls.Bytes}, decls.Bytes), + decls.NewOverload(overloads.StringToBytes, []*exprpb.Type{decls.String}, decls.Bytes)), + + // Conversions to timestamps. + + decls.NewFunction(overloads.TypeConvertTimestamp, + decls.NewOverload(overloads.TimestampToTimestamp, + []*exprpb.Type{decls.Timestamp}, decls.Timestamp), + decls.NewOverload(overloads.StringToTimestamp, + []*exprpb.Type{decls.String}, decls.Timestamp), + decls.NewOverload(overloads.IntToTimestamp, + []*exprpb.Type{decls.Int}, decls.Timestamp)), + + // Conversions to durations. + + decls.NewFunction(overloads.TypeConvertDuration, + decls.NewOverload(overloads.DurationToDuration, + []*exprpb.Type{decls.Duration}, decls.Duration), + decls.NewOverload(overloads.StringToDuration, + []*exprpb.Type{decls.String}, decls.Duration), + decls.NewOverload(overloads.IntToDuration, + []*exprpb.Type{decls.Int}, decls.Duration)), + + // Conversions to Dyn. + + decls.NewFunction(overloads.TypeConvertDyn, + decls.NewParameterizedOverload(overloads.ToDyn, + []*exprpb.Type{paramA}, decls.Dyn, + typeParamAList)), + + // String functions. + + decls.NewFunction(overloads.Contains, + decls.NewInstanceOverload(overloads.ContainsString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool)), + decls.NewFunction(overloads.EndsWith, + decls.NewInstanceOverload(overloads.EndsWithString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool)), + decls.NewFunction(overloads.Matches, + decls.NewOverload(overloads.Matches, + []*exprpb.Type{decls.String, decls.String}, decls.Bool), + decls.NewInstanceOverload(overloads.MatchesString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool)), + decls.NewFunction(overloads.StartsWith, + decls.NewInstanceOverload(overloads.StartsWithString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool)), + + // Date/time functions. + + decls.NewFunction(overloads.TimeGetFullYear, + decls.NewInstanceOverload(overloads.TimestampToYear, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToYearWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), + + decls.NewFunction(overloads.TimeGetMonth, + decls.NewInstanceOverload(overloads.TimestampToMonth, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToMonthWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), + + decls.NewFunction(overloads.TimeGetDayOfYear, + decls.NewInstanceOverload(overloads.TimestampToDayOfYear, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToDayOfYearWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), + + decls.NewFunction(overloads.TimeGetDayOfMonth, + decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBased, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), + + decls.NewFunction(overloads.TimeGetDate, + decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBased, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), + + decls.NewFunction(overloads.TimeGetDayOfWeek, + decls.NewInstanceOverload(overloads.TimestampToDayOfWeek, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToDayOfWeekWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int)), + + decls.NewFunction(overloads.TimeGetHours, + decls.NewInstanceOverload(overloads.TimestampToHours, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToHoursWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), + decls.NewInstanceOverload(overloads.DurationToHours, + []*exprpb.Type{decls.Duration}, decls.Int)), + + decls.NewFunction(overloads.TimeGetMinutes, + decls.NewInstanceOverload(overloads.TimestampToMinutes, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToMinutesWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), + decls.NewInstanceOverload(overloads.DurationToMinutes, + []*exprpb.Type{decls.Duration}, decls.Int)), + + decls.NewFunction(overloads.TimeGetSeconds, + decls.NewInstanceOverload(overloads.TimestampToSeconds, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToSecondsWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), + decls.NewInstanceOverload(overloads.DurationToSeconds, + []*exprpb.Type{decls.Duration}, decls.Int)), + + decls.NewFunction(overloads.TimeGetMilliseconds, + decls.NewInstanceOverload(overloads.TimestampToMilliseconds, + []*exprpb.Type{decls.Timestamp}, decls.Int), + decls.NewInstanceOverload(overloads.TimestampToMillisecondsWithTz, + []*exprpb.Type{decls.Timestamp, decls.String}, decls.Int), + decls.NewInstanceOverload(overloads.DurationToMilliseconds, + []*exprpb.Type{decls.Duration}, decls.Int)), + + // Relations. + decls.NewFunction(operators.Less, + decls.NewOverload(overloads.LessBool, + []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), + decls.NewOverload(overloads.LessInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), + decls.NewOverload(overloads.LessInt64Double, + []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), + decls.NewOverload(overloads.LessInt64Uint64, + []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.LessUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.LessUint64Double, + []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), + decls.NewOverload(overloads.LessUint64Int64, + []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), + decls.NewOverload(overloads.LessDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), + decls.NewOverload(overloads.LessDoubleInt64, + []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), + decls.NewOverload(overloads.LessDoubleUint64, + []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.LessString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool), + decls.NewOverload(overloads.LessBytes, + []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), + decls.NewOverload(overloads.LessTimestamp, + []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), + decls.NewOverload(overloads.LessDuration, + []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), + + decls.NewFunction(operators.LessEquals, + decls.NewOverload(overloads.LessEqualsBool, + []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), + decls.NewOverload(overloads.LessEqualsInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), + decls.NewOverload(overloads.LessEqualsInt64Double, + []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), + decls.NewOverload(overloads.LessEqualsInt64Uint64, + []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.LessEqualsUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.LessEqualsUint64Double, + []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), + decls.NewOverload(overloads.LessEqualsUint64Int64, + []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), + decls.NewOverload(overloads.LessEqualsDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), + decls.NewOverload(overloads.LessEqualsDoubleInt64, + []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), + decls.NewOverload(overloads.LessEqualsDoubleUint64, + []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.LessEqualsString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool), + decls.NewOverload(overloads.LessEqualsBytes, + []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), + decls.NewOverload(overloads.LessEqualsTimestamp, + []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), + decls.NewOverload(overloads.LessEqualsDuration, + []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), + + decls.NewFunction(operators.Greater, + decls.NewOverload(overloads.GreaterBool, + []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), + decls.NewOverload(overloads.GreaterInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), + decls.NewOverload(overloads.GreaterInt64Double, + []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), + decls.NewOverload(overloads.GreaterInt64Uint64, + []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.GreaterUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.GreaterUint64Double, + []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), + decls.NewOverload(overloads.GreaterUint64Int64, + []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), + decls.NewOverload(overloads.GreaterDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), + decls.NewOverload(overloads.GreaterDoubleInt64, + []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), + decls.NewOverload(overloads.GreaterDoubleUint64, + []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.GreaterString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool), + decls.NewOverload(overloads.GreaterBytes, + []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), + decls.NewOverload(overloads.GreaterTimestamp, + []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), + decls.NewOverload(overloads.GreaterDuration, + []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), + + decls.NewFunction(operators.GreaterEquals, + decls.NewOverload(overloads.GreaterEqualsBool, + []*exprpb.Type{decls.Bool, decls.Bool}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsInt64, + []*exprpb.Type{decls.Int, decls.Int}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsInt64Double, + []*exprpb.Type{decls.Int, decls.Double}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsInt64Uint64, + []*exprpb.Type{decls.Int, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsUint64, + []*exprpb.Type{decls.Uint, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsUint64Double, + []*exprpb.Type{decls.Uint, decls.Double}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsUint64Int64, + []*exprpb.Type{decls.Uint, decls.Int}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsDouble, + []*exprpb.Type{decls.Double, decls.Double}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsDoubleInt64, + []*exprpb.Type{decls.Double, decls.Int}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsDoubleUint64, + []*exprpb.Type{decls.Double, decls.Uint}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsString, + []*exprpb.Type{decls.String, decls.String}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsBytes, + []*exprpb.Type{decls.Bytes, decls.Bytes}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsTimestamp, + []*exprpb.Type{decls.Timestamp, decls.Timestamp}, decls.Bool), + decls.NewOverload(overloads.GreaterEqualsDuration, + []*exprpb.Type{decls.Duration, decls.Duration}, decls.Bool)), + }...) } -// StandardTypes returns the set of type identifiers for standard library types. -// -// Deprecated: prefer stdlib.TypeExprDecls() -func StandardTypes() []*exprpb.Decl { - return stdlib.TypeExprDecls() +// StandardDeclarations returns the Decls for all functions and constants in the evaluator. +func StandardDeclarations() []*exprpb.Decl { + return standardDeclarations } diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go index e2373d1b7cb..28d21c9d929 100644 --- a/vendor/github.com/google/cel-go/checker/types.go +++ b/vendor/github.com/google/cel-go/checker/types.go @@ -15,54 +15,154 @@ package checker import ( - "github.com/google/cel-go/common/types" + "fmt" + "strings" + + "github.com/google/cel-go/checker/decls" + + "google.golang.org/protobuf/proto" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) +const ( + kindUnknown = iota + 1 + kindError + kindFunction + kindDyn + kindPrimitive + kindWellKnown + kindWrapper + kindNull + kindAbstract + kindType + kindList + kindMap + kindObject + kindTypeParam +) + +// FormatCheckedType converts a type message into a string representation. +func FormatCheckedType(t *exprpb.Type) string { + switch kindOf(t) { + case kindDyn: + return "dyn" + case kindFunction: + return formatFunction(t.GetFunction().GetResultType(), + t.GetFunction().GetArgTypes(), + false) + case kindList: + return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType())) + case kindObject: + return t.GetMessageType() + case kindMap: + return fmt.Sprintf("map(%s, %s)", + FormatCheckedType(t.GetMapType().GetKeyType()), + FormatCheckedType(t.GetMapType().GetValueType())) + case kindNull: + return "null" + case kindPrimitive: + switch t.GetPrimitive() { + case exprpb.Type_UINT64: + return "uint" + case exprpb.Type_INT64: + return "int" + } + return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") + case kindType: + if t.GetType() == nil { + return "type" + } + return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) + case kindWellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return "any" + case exprpb.Type_DURATION: + return "duration" + case exprpb.Type_TIMESTAMP: + return "timestamp" + } + case kindWrapper: + return fmt.Sprintf("wrapper(%s)", + FormatCheckedType(decls.NewPrimitiveType(t.GetWrapper()))) + case kindError: + return "!error!" + case kindTypeParam: + return t.GetTypeParam() + case kindAbstract: + at := t.GetAbstractType() + params := at.GetParameterTypes() + paramStrs := make([]string, len(params)) + for i, p := range params { + paramStrs[i] = FormatCheckedType(p) + } + return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", ")) + } + return t.String() +} + // isDyn returns true if the input t is either type DYN or a well-known ANY message. -func isDyn(t *types.Type) bool { +func isDyn(t *exprpb.Type) bool { // Note: object type values that are well-known and map to a DYN value in practice // are sanitized prior to being added to the environment. - switch t.Kind() { - case types.DynKind, types.AnyKind: + switch kindOf(t) { + case kindDyn: return true + case kindWellKnown: + return t.GetWellKnown() == exprpb.Type_ANY default: return false } } // isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message. -func isDynOrError(t *types.Type) bool { +func isDynOrError(t *exprpb.Type) bool { return isError(t) || isDyn(t) } -func isError(t *types.Type) bool { - return t.Kind() == types.ErrorKind +func isError(t *exprpb.Type) bool { + return kindOf(t) == kindError } -func isOptional(t *types.Type) bool { - if t.Kind() == types.OpaqueKind { - return t.TypeName() == "optional" +func isOptional(t *exprpb.Type) bool { + if kindOf(t) == kindAbstract { + at := t.GetAbstractType() + return at.GetName() == "optional" } return false } -func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) { +func maybeUnwrapOptional(t *exprpb.Type) (*exprpb.Type, bool) { if isOptional(t) { - return t.Parameters()[0], true + at := t.GetAbstractType() + return at.GetParameterTypes()[0], true } return t, false } +func maybeUnwrapString(e *exprpb.Expr) (string, bool) { + switch e.GetExprKind().(type) { + case *exprpb.Expr_ConstExpr: + literal := e.GetConstExpr() + switch literal.GetConstantKind().(type) { + case *exprpb.Constant_StringValue: + return literal.GetStringValue(), true + } + } + return "", false +} + // isEqualOrLessSpecific checks whether one type is equal or less specific than the other one. // A type is less specific if it matches the other type using the DYN type. -func isEqualOrLessSpecific(t1, t2 *types.Type) bool { - kind1, kind2 := t1.Kind(), t2.Kind() +func isEqualOrLessSpecific(t1 *exprpb.Type, t2 *exprpb.Type) bool { + kind1, kind2 := kindOf(t1), kindOf(t2) // The first type is less specific. - if isDyn(t1) || kind1 == types.TypeParamKind { + if isDyn(t1) || kind1 == kindTypeParam { return true } // The first type is not less specific. - if isDyn(t2) || kind2 == types.TypeParamKind { + if isDyn(t2) || kind2 == kindTypeParam { return false } // Types must be of the same kind to be equal. @@ -73,34 +173,38 @@ func isEqualOrLessSpecific(t1, t2 *types.Type) bool { // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in // order to return true. switch kind1 { - case types.OpaqueKind: - if t1.TypeName() != t2.TypeName() || - len(t1.Parameters()) != len(t2.Parameters()) { + case kindAbstract: + a1 := t1.GetAbstractType() + a2 := t2.GetAbstractType() + if a1.GetName() != a2.GetName() || + len(a1.GetParameterTypes()) != len(a2.GetParameterTypes()) { return false } - for i, p1 := range t1.Parameters() { - if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) { + for i, p1 := range a1.GetParameterTypes() { + if !isEqualOrLessSpecific(p1, a2.GetParameterTypes()[i]) { return false } } return true - case types.ListKind: - return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) - case types.MapKind: - return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) && - isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1]) - case types.TypeKind: + case kindList: + return isEqualOrLessSpecific(t1.GetListType().GetElemType(), t2.GetListType().GetElemType()) + case kindMap: + m1 := t1.GetMapType() + m2 := t2.GetMapType() + return isEqualOrLessSpecific(m1.GetKeyType(), m2.GetKeyType()) && + isEqualOrLessSpecific(m1.GetValueType(), m2.GetValueType()) + case kindType: return true default: - return t1.IsExactType(t2) + return proto.Equal(t1, t2) } } // / internalIsAssignable returns true if t1 is assignable to t2. -func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { +func internalIsAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) bool { // Process type parameters. - kind1, kind2 := t1.Kind(), t2.Kind() - if kind2 == types.TypeParamKind { + kind1, kind2 := kindOf(t1), kindOf(t2) + if kind2 == kindTypeParam { // If t2 is a valid type substitution for t1, return true. valid, t2HasSub := isValidTypeSubstitution(m, t1, t2) if valid { @@ -113,7 +217,7 @@ func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { } // Otherwise, fall through to check whether t1 is a possible substitution for t2. } - if kind1 == types.TypeParamKind { + if kind1 == kindTypeParam { // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the // possible type substitutions have been searched in both directions. valid, _ := isValidTypeSubstitution(m, t2, t1) @@ -124,25 +228,40 @@ func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { if isDynOrError(t1) || isDynOrError(t2) { return true } - // Preserve the nullness checks of the legacy type-checker. - if kind1 == types.NullTypeKind { + + // Test for when the types do not need to agree, but are more specific than dyn. + switch kind1 { + case kindNull: return internalIsAssignableNull(t2) - } - if kind2 == types.NullTypeKind { - return internalIsAssignableNull(t1) + case kindPrimitive: + return internalIsAssignablePrimitive(t1.GetPrimitive(), t2) + case kindWrapper: + return internalIsAssignable(m, decls.NewPrimitiveType(t1.GetWrapper()), t2) + default: + if kind1 != kind2 { + return false + } } - // Test for when the types do not need to agree, but are more specific than dyn. + // Test for when the types must agree. switch kind1 { - case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind, - types.AnyKind, types.DurationKind, types.TimestampKind, - types.StructKind: - return t1.IsAssignableType(t2) - case types.TypeKind: - return kind2 == types.TypeKind - case types.OpaqueKind, types.ListKind, types.MapKind: - return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() && - internalIsAssignableList(m, t1.Parameters(), t2.Parameters()) + // ERROR, TYPE_PARAM, and DYN handled above. + case kindAbstract: + return internalIsAssignableAbstractType(m, t1.GetAbstractType(), t2.GetAbstractType()) + case kindFunction: + return internalIsAssignableFunction(m, t1.GetFunction(), t2.GetFunction()) + case kindList: + return internalIsAssignable(m, t1.GetListType().GetElemType(), t2.GetListType().GetElemType()) + case kindMap: + return internalIsAssignableMap(m, t1.GetMapType(), t2.GetMapType()) + case kindObject: + return t1.GetMessageType() == t2.GetMessageType() + case kindType: + // A type is a type is a type, any additional parameterization of the + // type cannot affect method resolution or assignability. + return true + case kindWellKnown: + return t1.GetWellKnown() == t2.GetWellKnown() default: return false } @@ -155,16 +274,16 @@ func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { // - t2 has a type substitution (t2sub) equal to t1 // - t2 has a type substitution (t2sub) assignable to t1 // - t2 does not occur within t1. -func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) { +func isValidTypeSubstitution(m *mapping, t1, t2 *exprpb.Type) (valid, hasSub bool) { // Early return if the t1 and t2 are the same instance. - kind1, kind2 := t1.Kind(), t2.Kind() - if kind1 == kind2 && t1.IsExactType(t2) { + kind1, kind2 := kindOf(t1), kindOf(t2) + if kind1 == kind2 && (t1 == t2 || proto.Equal(t1, t2)) { return true, true } if t2Sub, found := m.find(t2); found { // Early return if t1 and t2Sub are the same instance as otherwise the mapping // might mark a type as being a subtitution for itself. - if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) { + if kind1 == kindOf(t2Sub) && (t1 == t2Sub || proto.Equal(t1, t2Sub)) { return true, true } // If the types are compatible, pick the more general type and return true @@ -186,10 +305,28 @@ func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool return false, false } +// internalIsAssignableAbstractType returns true if the abstract type names agree and all type +// parameters are assignable. +func internalIsAssignableAbstractType(m *mapping, a1 *exprpb.Type_AbstractType, a2 *exprpb.Type_AbstractType) bool { + return a1.GetName() == a2.GetName() && + internalIsAssignableList(m, a1.GetParameterTypes(), a2.GetParameterTypes()) +} + +// internalIsAssignableFunction returns true if the function return type and arg types are +// assignable. +func internalIsAssignableFunction(m *mapping, f1 *exprpb.Type_FunctionType, f2 *exprpb.Type_FunctionType) bool { + f1ArgTypes := flattenFunctionTypes(f1) + f2ArgTypes := flattenFunctionTypes(f2) + if internalIsAssignableList(m, f1ArgTypes, f2ArgTypes) { + return true + } + return false +} + // internalIsAssignableList returns true if the element types at each index in the list are // assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be // assignable. -func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool { +func internalIsAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) bool { if len(l1) != len(l2) { return false } @@ -201,22 +338,41 @@ func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool { return true } -// internalIsAssignableNull returns true if the type is nullable. -func internalIsAssignableNull(t *types.Type) bool { - return isLegacyNullable(t) || t.IsAssignableType(types.NullType) +// internalIsAssignableMap returns true if map m1 may be assigned to map m2. +func internalIsAssignableMap(m *mapping, m1 *exprpb.Type_MapType, m2 *exprpb.Type_MapType) bool { + if internalIsAssignableList(m, + []*exprpb.Type{m1.GetKeyType(), m1.GetValueType()}, + []*exprpb.Type{m2.GetKeyType(), m2.GetValueType()}) { + return true + } + return false } -// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation. -func isLegacyNullable(t *types.Type) bool { - switch t.Kind() { - case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind: +// internalIsAssignableNull returns true if the type is nullable. +func internalIsAssignableNull(t *exprpb.Type) bool { + switch kindOf(t) { + case kindAbstract, kindObject, kindNull, kindWellKnown, kindWrapper: return true + default: + return false + } +} + +// internalIsAssignablePrimitive returns true if the target type is the same or if it is a wrapper +// for the primitive type. +func internalIsAssignablePrimitive(p exprpb.Type_PrimitiveType, target *exprpb.Type) bool { + switch kindOf(target) { + case kindPrimitive: + return p == target.GetPrimitive() + case kindWrapper: + return p == target.GetWrapper() + default: + return false } - return false } // isAssignable returns an updated type substitution mapping if t1 is assignable to t2. -func isAssignable(m *mapping, t1, t2 *types.Type) *mapping { +func isAssignable(m *mapping, t1 *exprpb.Type, t2 *exprpb.Type) *mapping { mCopy := m.copy() if internalIsAssignable(mCopy, t1, t2) { return mCopy @@ -225,7 +381,7 @@ func isAssignable(m *mapping, t1, t2 *types.Type) *mapping { } // isAssignableList returns an updated type substitution mapping if l1 is assignable to l2. -func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping { +func isAssignableList(m *mapping, l1 []*exprpb.Type, l2 []*exprpb.Type) *mapping { mCopy := m.copy() if internalIsAssignableList(mCopy, l1, l2) { return mCopy @@ -233,8 +389,44 @@ func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping { return nil } +// kindOf returns the kind of the type as defined in the checked.proto. +func kindOf(t *exprpb.Type) int { + if t == nil || t.TypeKind == nil { + return kindUnknown + } + switch t.GetTypeKind().(type) { + case *exprpb.Type_Error: + return kindError + case *exprpb.Type_Function: + return kindFunction + case *exprpb.Type_Dyn: + return kindDyn + case *exprpb.Type_Primitive: + return kindPrimitive + case *exprpb.Type_WellKnown: + return kindWellKnown + case *exprpb.Type_Wrapper: + return kindWrapper + case *exprpb.Type_Null: + return kindNull + case *exprpb.Type_Type: + return kindType + case *exprpb.Type_ListType_: + return kindList + case *exprpb.Type_MapType_: + return kindMap + case *exprpb.Type_MessageType: + return kindObject + case *exprpb.Type_TypeParam: + return kindTypeParam + case *exprpb.Type_AbstractType_: + return kindAbstract + } + return kindUnknown +} + // mostGeneral returns the more general of two types which are known to unify. -func mostGeneral(t1, t2 *types.Type) *types.Type { +func mostGeneral(t1 *exprpb.Type, t2 *exprpb.Type) *exprpb.Type { if isEqualOrLessSpecific(t1, t2) { return t1 } @@ -244,25 +436,32 @@ func mostGeneral(t1, t2 *types.Type) *types.Type { // notReferencedIn checks whether the type doesn't appear directly or transitively within the other // type. This is a standard requirement for type unification, commonly referred to as the "occurs // check". -func notReferencedIn(m *mapping, t, withinType *types.Type) bool { - if t.IsExactType(withinType) { +func notReferencedIn(m *mapping, t *exprpb.Type, withinType *exprpb.Type) bool { + if proto.Equal(t, withinType) { return false } - withinKind := withinType.Kind() + withinKind := kindOf(withinType) switch withinKind { - case types.TypeParamKind: + case kindTypeParam: wtSub, found := m.find(withinType) if !found { return true } return notReferencedIn(m, t, wtSub) - case types.OpaqueKind, types.ListKind, types.MapKind: - for _, pt := range withinType.Parameters() { + case kindAbstract: + for _, pt := range withinType.GetAbstractType().GetParameterTypes() { if !notReferencedIn(m, t, pt) { return false } } return true + case kindList: + return notReferencedIn(m, t, withinType.GetListType().GetElemType()) + case kindMap: + mt := withinType.GetMapType() + return notReferencedIn(m, t, mt.GetKeyType()) && notReferencedIn(m, t, mt.GetValueType()) + case kindWrapper: + return notReferencedIn(m, t, decls.NewPrimitiveType(withinType.GetWrapper())) default: return true } @@ -270,25 +469,39 @@ func notReferencedIn(m *mapping, t, withinType *types.Type) bool { // substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type // parameters are replaced by DYN if typeParamToDyn is true. -func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { +func substitute(m *mapping, t *exprpb.Type, typeParamToDyn bool) *exprpb.Type { if tSub, found := m.find(t); found { return substitute(m, tSub, typeParamToDyn) } - kind := t.Kind() - if typeParamToDyn && kind == types.TypeParamKind { - return types.DynType + kind := kindOf(t) + if typeParamToDyn && kind == kindTypeParam { + return decls.Dyn } switch kind { - case types.OpaqueKind: - return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...) - case types.ListKind: - return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn)) - case types.MapKind: - return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn), - substitute(m, t.Parameters()[1], typeParamToDyn)) - case types.TypeKind: - if len(t.Parameters()) > 0 { - return types.NewTypeTypeWithParam(substitute(m, t.Parameters()[0], typeParamToDyn)) + case kindAbstract: + at := t.GetAbstractType() + params := make([]*exprpb.Type, len(at.GetParameterTypes())) + for i, p := range at.GetParameterTypes() { + params[i] = substitute(m, p, typeParamToDyn) + } + return decls.NewAbstractType(at.GetName(), params...) + case kindFunction: + fn := t.GetFunction() + rt := substitute(m, fn.ResultType, typeParamToDyn) + args := make([]*exprpb.Type, len(fn.GetArgTypes())) + for i, a := range fn.ArgTypes { + args[i] = substitute(m, a, typeParamToDyn) + } + return decls.NewFunctionType(rt, args...) + case kindList: + return decls.NewListType(substitute(m, t.GetListType().GetElemType(), typeParamToDyn)) + case kindMap: + mt := t.GetMapType() + return decls.NewMapType(substitute(m, mt.GetKeyType(), typeParamToDyn), + substitute(m, mt.GetValueType(), typeParamToDyn)) + case kindType: + if t.GetType() != nil { + return decls.NewTypeType(substitute(m, t.GetType(), typeParamToDyn)) } return t default: @@ -296,14 +509,21 @@ func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { } } -func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type { - subParams := make([]*types.Type, len(typeParams)) - for i, tp := range typeParams { - subParams[i] = substitute(m, tp, typeParamToDyn) - } - return subParams +func typeKey(t *exprpb.Type) string { + return FormatCheckedType(t) } -func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type { - return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...) +// flattenFunctionTypes takes a function with arg types T1, T2, ..., TN and result type TR +// and returns a slice containing {T1, T2, ..., TN, TR}. +func flattenFunctionTypes(f *exprpb.Type_FunctionType) []*exprpb.Type { + argTypes := f.GetArgTypes() + if len(argTypes) == 0 { + return []*exprpb.Type{f.GetResultType()} + } + flattend := make([]*exprpb.Type, len(argTypes)+1, len(argTypes)+1) + for i, at := range argTypes { + flattend[i] = at + } + flattend[len(argTypes)] = f.GetResultType() + return flattend } diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel deleted file mode 100644 index 7269cdff5f7..00000000000 --- a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel +++ /dev/null @@ -1,52 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = [ - "//cel:__subpackages__", - "//checker:__subpackages__", - "//common:__subpackages__", - "//interpreter:__subpackages__", - ], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "ast.go", - "expr.go", - ], - importpath = "github.com/google/cel-go/common/ast", - deps = [ - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_google_protobuf//types/known/structpb:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "ast_test.go", - "expr_test.go", - ], - embed = [ - ":go_default_library", - ], - deps = [ - "//checker:go_default_library", - "//checker/decls:go_default_library", - "//common:go_default_library", - "//common/containers:go_default_library", - "//common/decls:go_default_library", - "//common/overloads:go_default_library", - "//common/stdlib:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//parser:go_default_library", - "//test/proto3pb:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - ], -) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go deleted file mode 100644 index b3c150793a9..00000000000 --- a/vendor/github.com/google/cel-go/common/ast/ast.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ast declares data structures useful for parsed and checked abstract syntax trees -package ast - -import ( - "fmt" - - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - - structpb "google.golang.org/protobuf/types/known/structpb" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// CheckedAST contains a protobuf expression and source info along with CEL-native type and reference information. -type CheckedAST struct { - Expr *exprpb.Expr - SourceInfo *exprpb.SourceInfo - TypeMap map[int64]*types.Type - ReferenceMap map[int64]*ReferenceInfo -} - -// CheckedASTToCheckedExpr converts a CheckedAST to a CheckedExpr protobouf. -func CheckedASTToCheckedExpr(ast *CheckedAST) (*exprpb.CheckedExpr, error) { - refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap)) - for id, ref := range ast.ReferenceMap { - r, err := ReferenceInfoToReferenceExpr(ref) - if err != nil { - return nil, err - } - refMap[id] = r - } - typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap)) - for id, typ := range ast.TypeMap { - t, err := types.TypeToExprType(typ) - if err != nil { - return nil, err - } - typeMap[id] = t - } - return &exprpb.CheckedExpr{ - Expr: ast.Expr, - SourceInfo: ast.SourceInfo, - ReferenceMap: refMap, - TypeMap: typeMap, - }, nil -} - -// CheckedExprToCheckedAST converts a CheckedExpr protobuf to a CheckedAST instance. -func CheckedExprToCheckedAST(checked *exprpb.CheckedExpr) (*CheckedAST, error) { - refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) - for id, ref := range checked.GetReferenceMap() { - r, err := ReferenceExprToReferenceInfo(ref) - if err != nil { - return nil, err - } - refMap[id] = r - } - typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) - for id, typ := range checked.GetTypeMap() { - t, err := types.ExprTypeToType(typ) - if err != nil { - return nil, err - } - typeMap[id] = t - } - return &CheckedAST{ - Expr: checked.GetExpr(), - SourceInfo: checked.GetSourceInfo(), - ReferenceMap: refMap, - TypeMap: typeMap, - }, nil -} - -// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to -// either a qualified identifier name, a set of overload ids, or a constant value from an enum. -type ReferenceInfo struct { - Name string - OverloadIDs []string - Value ref.Val -} - -// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value. -func NewIdentReference(name string, value ref.Val) *ReferenceInfo { - return &ReferenceInfo{Name: name, Value: value} -} - -// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads. -func NewFunctionReference(overloads ...string) *ReferenceInfo { - info := &ReferenceInfo{} - for _, id := range overloads { - info.AddOverload(id) - } - return info -} - -// AddOverload appends a function overload ID to the ReferenceInfo. -func (r *ReferenceInfo) AddOverload(overloadID string) { - for _, id := range r.OverloadIDs { - if id == overloadID { - return - } - } - r.OverloadIDs = append(r.OverloadIDs, overloadID) -} - -// Equals returns whether two references are identical to each other. -func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { - if r.Name != other.Name { - return false - } - if len(r.OverloadIDs) != len(other.OverloadIDs) { - return false - } - if len(r.OverloadIDs) != 0 { - overloadMap := make(map[string]struct{}, len(r.OverloadIDs)) - for _, id := range r.OverloadIDs { - overloadMap[id] = struct{}{} - } - for _, id := range other.OverloadIDs { - _, found := overloadMap[id] - if !found { - return false - } - } - } - if r.Value == nil && other.Value == nil { - return true - } - if r.Value == nil && other.Value != nil || - r.Value != nil && other.Value == nil || - r.Value.Equal(other.Value) != types.True { - return false - } - return true -} - -// ReferenceInfoToReferenceExpr converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. -func ReferenceInfoToReferenceExpr(info *ReferenceInfo) (*exprpb.Reference, error) { - c, err := ValToConstant(info.Value) - if err != nil { - return nil, err - } - return &exprpb.Reference{ - Name: info.Name, - OverloadId: info.OverloadIDs, - Value: c, - }, nil -} - -// ReferenceExprToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. -func ReferenceExprToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { - v, err := ConstantToVal(ref.GetValue()) - if err != nil { - return nil, err - } - return &ReferenceInfo{ - Name: ref.GetName(), - OverloadIDs: ref.GetOverloadId(), - Value: v, - }, nil -} - -// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. -// -// Only simple scalar types are supported by this method. -func ValToConstant(v ref.Val) (*exprpb.Constant, error) { - if v == nil { - return nil, nil - } - switch v.Type() { - case types.BoolType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil - case types.BytesType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil - case types.DoubleType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil - case types.IntType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil - case types.NullType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil - case types.StringType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil - case types.UintType: - return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil - } - return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) -} - -// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. -func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { - if c == nil { - return nil, nil - } - switch c.GetConstantKind().(type) { - case *exprpb.Constant_BoolValue: - return types.Bool(c.GetBoolValue()), nil - case *exprpb.Constant_BytesValue: - return types.Bytes(c.GetBytesValue()), nil - case *exprpb.Constant_DoubleValue: - return types.Double(c.GetDoubleValue()), nil - case *exprpb.Constant_Int64Value: - return types.Int(c.GetInt64Value()), nil - case *exprpb.Constant_NullValue: - return types.NullValue, nil - case *exprpb.Constant_StringValue: - return types.String(c.GetStringValue()), nil - case *exprpb.Constant_Uint64Value: - return types.Uint(c.GetUint64Value()), nil - } - return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) -} diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go deleted file mode 100644 index b63884a6028..00000000000 --- a/vendor/github.com/google/cel-go/common/ast/expr.go +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ast - -import ( - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// ExprKind represents the expression node kind. -type ExprKind int - -const ( - // UnspecifiedKind represents an unset expression with no specified properties. - UnspecifiedKind ExprKind = iota - - // LiteralKind represents a primitive scalar literal. - LiteralKind - - // IdentKind represents a simple variable, constant, or type identifier. - IdentKind - - // SelectKind represents a field selection expression. - SelectKind - - // CallKind represents a function call. - CallKind - - // ListKind represents a list literal expression. - ListKind - - // MapKind represents a map literal expression. - MapKind - - // StructKind represents a struct literal expression. - StructKind - - // ComprehensionKind represents a comprehension expression generated by a macro. - ComprehensionKind -) - -// NavigateCheckedAST converts a CheckedAST to a NavigableExpr -func NavigateCheckedAST(ast *CheckedAST) NavigableExpr { - return newNavigableExpr(nil, ast.Expr, ast.TypeMap) -} - -// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. -// -// This function type should be use with the `Match` and `MatchList` calls. -type ExprMatcher func(NavigableExpr) bool - -// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr -// is comprised of all constant values, such as a simple literal or even list and map literal. -func ConstantValueMatcher() ExprMatcher { - return matchIsConstantValue -} - -// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches -// the specified `kind`. -func KindMatcher(kind ExprKind) ExprMatcher { - return func(e NavigableExpr) bool { - return e.Kind() == kind - } -} - -// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose -// function name is equal to `funcName`. -func FunctionMatcher(funcName string) ExprMatcher { - return func(e NavigableExpr) bool { - if e.Kind() != CallKind { - return false - } - return e.AsCall().FunctionName() == funcName - } -} - -// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. -// -// Such a result would work well with subsequent MatchList calls. -func AllMatcher() ExprMatcher { - return func(NavigableExpr) bool { - return true - } -} - -// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values of the -// descendants which match. -func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { - return matchListInternal([]NavigableExpr{expr}, matcher, true) -} - -// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a -// subset of NavigableExpr values which match. -func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { - visit := make([]NavigableExpr, len(exprs)) - copy(visit, exprs) - return matchListInternal(visit, matcher, false) -} - -func matchListInternal(visit []NavigableExpr, matcher ExprMatcher, visitDescendants bool) []NavigableExpr { - var matched []NavigableExpr - for len(visit) != 0 { - e := visit[0] - if matcher(e) { - matched = append(matched, e) - } - if visitDescendants { - visit = append(visit[1:], e.Children()...) - } else { - visit = visit[1:] - } - } - return matched -} - -func matchIsConstantValue(e NavigableExpr) bool { - if e.Kind() == LiteralKind { - return true - } - if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { - for _, child := range e.Children() { - if !matchIsConstantValue(child) { - return false - } - } - return true - } - return false -} - -// NavigableExpr represents the base navigable expression value. -// -// Depending on the `Kind()` value, the NavigableExpr may be converted to a concrete expression types -// as indicated by the `As` methods. -// -// NavigableExpr values and their concrete expression types should be nil-safe. Conversion of an expr -// to the wrong kind should produce a nil value. -type NavigableExpr interface { - // ID of the expression as it appears in the AST - ID() int64 - - // Kind of the expression node. See ExprKind for the valid enum values. - Kind() ExprKind - - // Type of the expression node. - Type() *types.Type - - // Parent returns the parent expression node, if one exists. - Parent() (NavigableExpr, bool) - - // Children returns a list of child expression nodes. - Children() []NavigableExpr - - // ToExpr adapts this NavigableExpr to a protobuf representation. - ToExpr() *exprpb.Expr - - // AsCall adapts the expr into a NavigableCallExpr - // - // The Kind() must be equal to a CallKind for the conversion to be well-defined. - AsCall() NavigableCallExpr - - // AsComprehension adapts the expr into a NavigableComprehensionExpr. - // - // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined. - AsComprehension() NavigableComprehensionExpr - - // AsIdent adapts the expr into an identifier string. - // - // The Kind() must be equal to an IdentKind for the conversion to be well-defined. - AsIdent() string - - // AsLiteral adapts the expr into a constant ref.Val. - // - // The Kind() must be equal to a LiteralKind for the conversion to be well-defined. - AsLiteral() ref.Val - - // AsList adapts the expr into a NavigableListExpr. - // - // The Kind() must be equal to a ListKind for the conversion to be well-defined. - AsList() NavigableListExpr - - // AsMap adapts the expr into a NavigableMapExpr. - // - // The Kind() must be equal to a MapKind for the conversion to be well-defined. - AsMap() NavigableMapExpr - - // AsSelect adapts the expr into a NavigableSelectExpr. - // - // The Kind() must be equal to a SelectKind for the conversion to be well-defined. - AsSelect() NavigableSelectExpr - - // AsStruct adapts the expr into a NavigableStructExpr. - // - // The Kind() must be equal to a StructKind for the conversion to be well-defined. - AsStruct() NavigableStructExpr - - // marker interface method - isNavigable() -} - -// NavigableCallExpr defines an interface for inspecting a function call and its arugments. -type NavigableCallExpr interface { - // FunctionName returns the name of the function. - FunctionName() string - - // Target returns the target of the expression if one is present. - Target() NavigableExpr - - // Args returns the list of call arguments, excluding the target. - Args() []NavigableExpr - - // ReturnType returns the result type of the call. - ReturnType() *types.Type - - // marker interface method - isNavigable() -} - -// NavigableListExpr defines an interface for inspecting a list literal expression. -type NavigableListExpr interface { - // Elements returns the list elements as navigable expressions. - Elements() []NavigableExpr - - // OptionalIndicies returns the list of optional indices in the list literal. - OptionalIndices() []int32 - - // Size returns the number of elements in the list. - Size() int - - // marker interface method - isNavigable() -} - -// NavigableSelectExpr defines an interface for inspecting a select expression. -type NavigableSelectExpr interface { - // Operand returns the selection operand expression. - Operand() NavigableExpr - - // FieldName returns the field name being selected from the operand. - FieldName() string - - // IsTestOnly indicates whether the select expression is a presence test generated by a macro. - IsTestOnly() bool - - // marker interface method - isNavigable() -} - -// NavigableMapExpr defines an interface for inspecting a map expression. -type NavigableMapExpr interface { - // Entries returns the map key value pairs as NavigableEntry values. - Entries() []NavigableEntry - - // Size returns the number of entries in the map. - Size() int - - // marker interface method - isNavigable() -} - -// NavigableEntry defines an interface for inspecting a map entry. -type NavigableEntry interface { - // Key returns the map entry key expression. - Key() NavigableExpr - - // Value returns the map entry value expression. - Value() NavigableExpr - - // IsOptional returns whether the entry is optional. - IsOptional() bool - - // marker interface method - isNavigable() -} - -// NavigableStructExpr defines an interfaces for inspecting a struct and its field initializers. -type NavigableStructExpr interface { - // TypeName returns the struct type name. - TypeName() string - - // Fields returns the set of field initializers in the struct expression as NavigableField values. - Fields() []NavigableField - - // marker interface method - isNavigable() -} - -// NavigableField defines an interface for inspecting a struct field initialization. -type NavigableField interface { - // FieldName returns the name of the field. - FieldName() string - - // Value returns the field initialization expression. - Value() NavigableExpr - - // IsOptional returns whether the field is optional. - IsOptional() bool - - // marker interface method - isNavigable() -} - -// NavigableComprehensionExpr defines an interface for inspecting a comprehension expression. -type NavigableComprehensionExpr interface { - // IterRange returns the iteration range expression. - IterRange() NavigableExpr - - // IterVar returns the iteration variable name. - IterVar() string - - // AccuVar returns the accumulation variable name. - AccuVar() string - - // AccuInit returns the accumulation variable initialization expression. - AccuInit() NavigableExpr - - // LoopCondition returns the loop condition expression. - LoopCondition() NavigableExpr - - // LoopStep returns the loop step expression. - LoopStep() NavigableExpr - - // Result returns the comprehension result expression. - Result() NavigableExpr - - // marker interface method - isNavigable() -} - -func newNavigableExpr(parent NavigableExpr, expr *exprpb.Expr, typeMap map[int64]*types.Type) NavigableExpr { - kind, factory := kindOf(expr) - nav := &navigableExprImpl{ - parent: parent, - kind: kind, - expr: expr, - typeMap: typeMap, - createChildren: factory, - } - return nav -} - -type navigableExprImpl struct { - parent NavigableExpr - kind ExprKind - expr *exprpb.Expr - typeMap map[int64]*types.Type - createChildren childFactory -} - -func (nav *navigableExprImpl) ID() int64 { - return nav.ToExpr().GetId() -} - -func (nav *navigableExprImpl) Kind() ExprKind { - return nav.kind -} - -func (nav *navigableExprImpl) Type() *types.Type { - if t, found := nav.typeMap[nav.ID()]; found { - return t - } - return types.DynType -} - -func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { - if nav.parent != nil { - return nav.parent, true - } - return nil, false -} - -func (nav *navigableExprImpl) Children() []NavigableExpr { - return nav.createChildren(nav) -} - -func (nav *navigableExprImpl) ToExpr() *exprpb.Expr { - return nav.expr -} - -func (nav *navigableExprImpl) AsCall() NavigableCallExpr { - return navigableCallImpl{navigableExprImpl: nav} -} - -func (nav *navigableExprImpl) AsComprehension() NavigableComprehensionExpr { - return navigableComprehensionImpl{navigableExprImpl: nav} -} - -func (nav *navigableExprImpl) AsIdent() string { - return nav.ToExpr().GetIdentExpr().GetName() -} - -func (nav *navigableExprImpl) AsLiteral() ref.Val { - if nav.Kind() != LiteralKind { - return nil - } - val, err := ConstantToVal(nav.ToExpr().GetConstExpr()) - if err != nil { - panic(err) - } - return val -} - -func (nav *navigableExprImpl) AsList() NavigableListExpr { - return navigableListImpl{navigableExprImpl: nav} -} - -func (nav *navigableExprImpl) AsMap() NavigableMapExpr { - return navigableMapImpl{navigableExprImpl: nav} -} - -func (nav *navigableExprImpl) AsSelect() NavigableSelectExpr { - return navigableSelectImpl{navigableExprImpl: nav} -} - -func (nav *navigableExprImpl) AsStruct() NavigableStructExpr { - return navigableStructImpl{navigableExprImpl: nav} -} - -func (nav *navigableExprImpl) createChild(e *exprpb.Expr) NavigableExpr { - return newNavigableExpr(nav, e, nav.typeMap) -} - -func (nav *navigableExprImpl) isNavigable() {} - -type navigableCallImpl struct { - *navigableExprImpl -} - -func (call navigableCallImpl) FunctionName() string { - return call.ToExpr().GetCallExpr().GetFunction() -} - -func (call navigableCallImpl) Target() NavigableExpr { - t := call.ToExpr().GetCallExpr().GetTarget() - if t != nil { - return call.createChild(t) - } - return nil -} - -func (call navigableCallImpl) Args() []NavigableExpr { - args := call.ToExpr().GetCallExpr().GetArgs() - navArgs := make([]NavigableExpr, len(args)) - for i, a := range args { - navArgs[i] = call.createChild(a) - } - return navArgs -} - -func (call navigableCallImpl) ReturnType() *types.Type { - return call.Type() -} - -type navigableComprehensionImpl struct { - *navigableExprImpl -} - -func (comp navigableComprehensionImpl) IterRange() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetIterRange()) -} - -func (comp navigableComprehensionImpl) IterVar() string { - return comp.ToExpr().GetComprehensionExpr().GetIterVar() -} - -func (comp navigableComprehensionImpl) AccuVar() string { - return comp.ToExpr().GetComprehensionExpr().GetAccuVar() -} - -func (comp navigableComprehensionImpl) AccuInit() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetAccuInit()) -} - -func (comp navigableComprehensionImpl) LoopCondition() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopCondition()) -} - -func (comp navigableComprehensionImpl) LoopStep() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetLoopStep()) -} - -func (comp navigableComprehensionImpl) Result() NavigableExpr { - return comp.createChild(comp.ToExpr().GetComprehensionExpr().GetResult()) -} - -type navigableListImpl struct { - *navigableExprImpl -} - -func (l navigableListImpl) Elements() []NavigableExpr { - return l.Children() -} - -func (l navigableListImpl) OptionalIndices() []int32 { - return l.ToExpr().GetListExpr().GetOptionalIndices() -} - -func (l navigableListImpl) Size() int { - return len(l.ToExpr().GetListExpr().GetElements()) -} - -type navigableMapImpl struct { - *navigableExprImpl -} - -func (m navigableMapImpl) Entries() []NavigableEntry { - mapExpr := m.ToExpr().GetStructExpr() - entries := make([]NavigableEntry, len(mapExpr.GetEntries())) - for i, e := range mapExpr.GetEntries() { - entries[i] = navigableEntryImpl{ - key: m.createChild(e.GetMapKey()), - val: m.createChild(e.GetValue()), - isOpt: e.GetOptionalEntry(), - } - } - return entries -} - -func (m navigableMapImpl) Size() int { - return len(m.ToExpr().GetStructExpr().GetEntries()) -} - -type navigableEntryImpl struct { - key NavigableExpr - val NavigableExpr - isOpt bool -} - -func (e navigableEntryImpl) Key() NavigableExpr { - return e.key -} - -func (e navigableEntryImpl) Value() NavigableExpr { - return e.val -} - -func (e navigableEntryImpl) IsOptional() bool { - return e.isOpt -} - -func (e navigableEntryImpl) isNavigable() {} - -type navigableSelectImpl struct { - *navigableExprImpl -} - -func (sel navigableSelectImpl) FieldName() string { - return sel.ToExpr().GetSelectExpr().GetField() -} - -func (sel navigableSelectImpl) IsTestOnly() bool { - return sel.ToExpr().GetSelectExpr().GetTestOnly() -} - -func (sel navigableSelectImpl) Operand() NavigableExpr { - return sel.createChild(sel.ToExpr().GetSelectExpr().GetOperand()) -} - -type navigableStructImpl struct { - *navigableExprImpl -} - -func (s navigableStructImpl) TypeName() string { - return s.ToExpr().GetStructExpr().GetMessageName() -} - -func (s navigableStructImpl) Fields() []NavigableField { - fieldInits := s.ToExpr().GetStructExpr().GetEntries() - fields := make([]NavigableField, len(fieldInits)) - for i, f := range fieldInits { - fields[i] = navigableFieldImpl{ - name: f.GetFieldKey(), - val: s.createChild(f.GetValue()), - isOpt: f.GetOptionalEntry(), - } - } - return fields -} - -type navigableFieldImpl struct { - name string - val NavigableExpr - isOpt bool -} - -func (f navigableFieldImpl) FieldName() string { - return f.name -} - -func (f navigableFieldImpl) Value() NavigableExpr { - return f.val -} - -func (f navigableFieldImpl) IsOptional() bool { - return f.isOpt -} - -func (f navigableFieldImpl) isNavigable() {} - -func kindOf(expr *exprpb.Expr) (ExprKind, childFactory) { - switch expr.GetExprKind().(type) { - case *exprpb.Expr_ConstExpr: - return LiteralKind, noopFactory - case *exprpb.Expr_IdentExpr: - return IdentKind, noopFactory - case *exprpb.Expr_SelectExpr: - return SelectKind, selectFactory - case *exprpb.Expr_CallExpr: - return CallKind, callArgFactory - case *exprpb.Expr_ListExpr: - return ListKind, listElemFactory - case *exprpb.Expr_StructExpr: - if expr.GetStructExpr().GetMessageName() != "" { - return StructKind, structEntryFactory - } - return MapKind, mapEntryFactory - case *exprpb.Expr_ComprehensionExpr: - return ComprehensionKind, comprehensionFactory - default: - return UnspecifiedKind, noopFactory - } -} - -type childFactory func(*navigableExprImpl) []NavigableExpr - -func noopFactory(*navigableExprImpl) []NavigableExpr { - return nil -} - -func selectFactory(nav *navigableExprImpl) []NavigableExpr { - return []NavigableExpr{ - nav.createChild(nav.ToExpr().GetSelectExpr().GetOperand()), - } -} - -func callArgFactory(nav *navigableExprImpl) []NavigableExpr { - call := nav.ToExpr().GetCallExpr() - argCount := len(call.GetArgs()) - if call.GetTarget() != nil { - argCount++ - } - navExprs := make([]NavigableExpr, argCount) - i := 0 - if call.GetTarget() != nil { - navExprs[i] = nav.createChild(call.GetTarget()) - i++ - } - for _, arg := range call.GetArgs() { - navExprs[i] = nav.createChild(arg) - i++ - } - return navExprs -} - -func listElemFactory(nav *navigableExprImpl) []NavigableExpr { - l := nav.ToExpr().GetListExpr() - navExprs := make([]NavigableExpr, len(l.GetElements())) - for i, e := range l.GetElements() { - navExprs[i] = nav.createChild(e) - } - return navExprs -} - -func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { - s := nav.ToExpr().GetStructExpr() - entries := make([]NavigableExpr, len(s.GetEntries())) - for i, e := range s.GetEntries() { - - entries[i] = nav.createChild(e.GetValue()) - } - return entries -} - -func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { - s := nav.ToExpr().GetStructExpr() - entries := make([]NavigableExpr, len(s.GetEntries())*2) - j := 0 - for _, e := range s.GetEntries() { - entries[j] = nav.createChild(e.GetMapKey()) - entries[j+1] = nav.createChild(e.GetValue()) - j += 2 - } - return entries -} - -func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { - compre := nav.ToExpr().GetComprehensionExpr() - return []NavigableExpr{ - nav.createChild(compre.GetIterRange()), - nav.createChild(compre.GetAccuInit()), - nav.createChild(compre.GetLoopCondition()), - nav.createChild(compre.GetLoopStep()), - nav.createChild(compre.GetResult()), - } -} diff --git a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel deleted file mode 100644 index 17791dce6a0..00000000000 --- a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel +++ /dev/null @@ -1,39 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "decls.go", - ], - importpath = "github.com/google/cel-go/common/decls", - deps = [ - "//checker/decls:go_default_library", - "//common/functions:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "decls_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//checker/decls:go_default_library", - "//common/overloads:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - "@org_golang_google_protobuf//proto:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go deleted file mode 100644 index 734ebe57e52..00000000000 --- a/vendor/github.com/google/cel-go/common/decls/decls.go +++ /dev/null @@ -1,844 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package decls contains function and variable declaration structs and helper methods. -package decls - -import ( - "fmt" - "strings" - - chkdecls "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/functions" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// NewFunction creates a new function declaration with a set of function options to configure overloads -// and function definitions (implementations). -// -// Functions are checked for name collisions and singleton redefinition. -func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) { - fn := &FunctionDecl{ - name: name, - overloads: map[string]*OverloadDecl{}, - overloadOrdinals: []string{}, - } - var err error - for _, opt := range opts { - fn, err = opt(fn) - if err != nil { - return nil, err - } - } - if len(fn.overloads) == 0 { - return nil, fmt.Errorf("function %s must have at least one overload", name) - } - return fn, nil -} - -// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all -// overload instances. -type FunctionDecl struct { - name string - - // overloads associated with the function name. - overloads map[string]*OverloadDecl - - // singleton implementation of the function for all overloads. - // - // If this option is set, an error will occur if any overloads specify a per-overload implementation - // or if another function with the same name attempts to redefine the singleton. - singleton *functions.Overload - - // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could - // add overhead on common operations. Setting this option true leaves error checks and argument checks - // intact. - disableTypeGuards bool - - // state indicates that the binding should be provided as a declaration, as a runtime binding, or both. - state declarationState - - // overloadOrdinals indicates the order in which the overload was declared. - overloadOrdinals []string -} - -type declarationState int - -const ( - declarationStateUnset declarationState = iota - declarationDisabled - declarationEnabled -) - -// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least' -func (f *FunctionDecl) Name() string { - if f == nil { - return "" - } - return f.name -} - -// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the -// declaration should not be exposed for use in expressions. -func (f *FunctionDecl) IsDeclarationDisabled() bool { - return f.state == declarationDisabled -} - -// Merge combines an existing function declaration with another. -// -// If a function is extended, by say adding new overloads to an existing function, then it is merged with the -// prior definition of the function at which point its overloads must not collide with pre-existing overloads -// and its bindings (singleton, or per-overload) must not conflict with previous definitions either. -func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) { - if f == other { - return f, nil - } - if f.Name() != other.Name() { - return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name()) - } - merged := &FunctionDecl{ - name: f.Name(), - overloads: make(map[string]*OverloadDecl, len(f.overloads)), - singleton: f.singleton, - overloadOrdinals: make([]string, len(f.overloads)), - // if one function is expecting type-guards and the other is not, then they - // must not be disabled. - disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards, - // default to the current functions declaration state. - state: f.state, - } - // If the other state indicates that the declaration should be explicitly enabled or - // disabled, then update the merged state with the most recent value. - if other.state != declarationStateUnset { - merged.state = other.state - } - // baseline copy of the overloads and their ordinals - copy(merged.overloadOrdinals, f.overloadOrdinals) - for oID, o := range f.overloads { - merged.overloads[oID] = o - } - // overloads and their ordinals are added from the left - for _, oID := range other.overloadOrdinals { - o := other.overloads[oID] - err := merged.AddOverload(o) - if err != nil { - return nil, fmt.Errorf("function declaration merge failed: %v", err) - } - } - if other.singleton != nil { - if merged.singleton != nil && merged.singleton != other.singleton { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) - } - merged.singleton = other.singleton - } - return merged, nil -} - -// AddOverload ensures that the new overload does not collide with an existing overload signature; -// however, if the function signatures are identical, the implementation may be rewritten as its -// difficult to compare functions by object identity. -func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { - if f == nil { - return fmt.Errorf("nil function cannot add overload: %s", overload.ID()) - } - for oID, o := range f.overloads { - if oID != overload.ID() && o.SignatureOverlaps(overload) { - return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID()) - } - if oID == overload.ID() { - if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { - // Allow redefinition of an overload implementation so long as the signatures match. - f.overloads[oID] = overload - return nil - } - return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) - } - } - f.overloadOrdinals = append(f.overloadOrdinals, overload.ID()) - f.overloads[overload.ID()] = overload - return nil -} - -// OverloadDecls returns the overload declarations in the order in which they were declared. -func (f *FunctionDecl) OverloadDecls() []*OverloadDecl { - if f == nil { - return []*OverloadDecl{} - } - overloads := make([]*OverloadDecl, 0, len(f.overloads)) - for _, oID := range f.overloadOrdinals { - overloads = append(overloads, f.overloads[oID]) - } - return overloads -} - -// Bindings produces a set of function bindings, if any are defined. -func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { - if f == nil { - return []*functions.Overload{}, nil - } - overloads := []*functions.Overload{} - nonStrict := false - for _, oID := range f.overloadOrdinals { - o := f.overloads[oID] - if o.hasBinding() { - overload := &functions.Overload{ - Operator: o.ID(), - Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards), - Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards), - Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards), - OperandTrait: o.OperandTrait(), - NonStrict: o.IsNonStrict(), - } - overloads = append(overloads, overload) - nonStrict = nonStrict || o.IsNonStrict() - } - } - if f.singleton != nil { - if len(overloads) != 0 { - return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name()) - } - overloads = []*functions.Overload{ - { - Operator: f.Name(), - Unary: f.singleton.Unary, - Binary: f.singleton.Binary, - Function: f.singleton.Function, - OperandTrait: f.singleton.OperandTrait, - }, - } - // fall-through to return single overload case. - } - if len(overloads) == 0 { - return overloads, nil - } - // Single overload. Replicate an entry for it using the function name as well. - if len(overloads) == 1 { - if overloads[0].Operator == f.Name() { - return overloads, nil - } - return append(overloads, &functions.Overload{ - Operator: f.Name(), - Unary: overloads[0].Unary, - Binary: overloads[0].Binary, - Function: overloads[0].Function, - NonStrict: overloads[0].NonStrict, - OperandTrait: overloads[0].OperandTrait, - }), nil - } - // All of the defined overloads are wrapped into a top-level function which - // performs dynamic dispatch to the proper overload based on the argument types. - bindings := append([]*functions.Overload{}, overloads...) - funcDispatch := func(args ...ref.Val) ref.Val { - for _, oID := range f.overloadOrdinals { - o := f.overloads[oID] - // During dynamic dispatch over multiple functions, signature agreement checks - // are preserved in order to assist with the function resolution step. - switch len(args) { - case 1: - if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { - return o.unaryOp(args[0]) - } - case 2: - if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { - return o.binaryOp(args[0], args[1]) - } - } - if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { - return o.functionOp(args...) - } - // eventually this will fall through to the noSuchOverload below. - } - return MaybeNoSuchOverload(f.Name(), args...) - } - function := &functions.Overload{ - Operator: f.Name(), - Function: funcDispatch, - NonStrict: nonStrict, - } - return append(bindings, function), nil -} - -// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or -// to return an unknown set, or to produce a new error for a missing function signature. -func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val { - argTypes := make([]string, len(args)) - var unk *types.Unknown = nil - for i, arg := range args { - if types.IsError(arg) { - return arg - } - if types.IsUnknown(arg) { - unk = types.MergeUnknowns(arg.(*types.Unknown), unk) - } - argTypes[i] = arg.Type().TypeName() - } - if unk != nil { - return unk - } - signature := strings.Join(argTypes, ", ") - return types.NewErr("no such overload: %s(%s)", funcName, signature) -} - -// FunctionOpt defines a functional option for mutating a function declaration. -type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error) - -// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls. -// Type guards remain on during dynamic dispatch for parsed-only expressions. -func DisableTypeGuards(value bool) FunctionOpt { - return func(fn *FunctionDecl) (*FunctionDecl, error) { - fn.disableTypeGuards = value - return fn, nil - } -} - -// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function -// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations -// of function declarations while still preserving the runtime behavior for previously compiled expressions. -func DisableDeclaration(value bool) FunctionOpt { - return func(fn *FunctionDecl) (*FunctionDecl, error) { - if value { - fn.state = declarationDisabled - } else { - fn.state = declarationEnabled - } - return fn, nil - } -} - -// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. -// -// Note, this approach works well if operand is expected to have a specific trait which it implements, -// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. -func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { - trait := 0 - for _, t := range traits { - trait = trait | t - } - return func(f *FunctionDecl) (*FunctionDecl, error) { - if f.singleton != nil { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) - } - f.singleton = &functions.Overload{ - Operator: f.Name(), - Unary: fn, - OperandTrait: trait, - } - return f, nil - } -} - -// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. -// -// Note, this approach works well if operand is expected to have a specific trait which it implements, -// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. -func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { - trait := 0 - for _, t := range traits { - trait = trait | t - } - return func(f *FunctionDecl) (*FunctionDecl, error) { - if f.singleton != nil { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) - } - f.singleton = &functions.Overload{ - Operator: f.Name(), - Binary: fn, - OperandTrait: trait, - } - return f, nil - } -} - -// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. -// -// Note, this approach works well if operand is expected to have a specific trait which it implements, -// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. -func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { - trait := 0 - for _, t := range traits { - trait = trait | t - } - return func(f *FunctionDecl) (*FunctionDecl, error) { - if f.singleton != nil { - return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) - } - f.singleton = &functions.Overload{ - Operator: f.Name(), - Function: fn, - OperandTrait: trait, - } - return f, nil - } -} - -// Overload defines a new global overload with an overload id, argument types, and result type. Through the -// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to -// be non-strict. -// -// Note: function bindings should be commonly configured with Overload instances whereas operand traits and -// strict-ness should be rare occurrences. -func Overload(overloadID string, - args []*types.Type, resultType *types.Type, - opts ...OverloadOpt) FunctionOpt { - return newOverload(overloadID, false, args, resultType, opts...) -} - -// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, -// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding, -// an operand trait, and to be non-strict. -// -// Note: function bindings should be commonly configured with Overload instances whereas operand traits and -// strict-ness should be rare occurrences. -func MemberOverload(overloadID string, - args []*types.Type, resultType *types.Type, - opts ...OverloadOpt) FunctionOpt { - return newOverload(overloadID, true, args, resultType, opts...) -} - -func newOverload(overloadID string, - memberFunction bool, args []*types.Type, resultType *types.Type, - opts ...OverloadOpt) FunctionOpt { - return func(f *FunctionDecl) (*FunctionDecl, error) { - overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...) - if err != nil { - return nil, err - } - err = f.AddOverload(overload) - if err != nil { - return nil, err - } - return f, nil - } -} - -func newOverloadInternal(overloadID string, - memberFunction bool, args []*types.Type, resultType *types.Type, - opts ...OverloadOpt) (*OverloadDecl, error) { - overload := &OverloadDecl{ - id: overloadID, - argTypes: args, - resultType: resultType, - isMemberFunction: memberFunction, - } - var err error - for _, opt := range opts { - overload, err = opt(overload) - if err != nil { - return nil, err - } - } - return overload, nil -} - -// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional -// implementation. -type OverloadDecl struct { - id string - argTypes []*types.Type - resultType *types.Type - isMemberFunction bool - // nonStrict indicates that the function will accept error and unknown arguments as inputs. - nonStrict bool - // operandTrait indicates whether the member argument should have a specific type-trait. - // - // This is useful for creating overloads which operate on a type-interface rather than a concrete type. - operandTrait int - - // Function implementation options. Optional, but encouraged. - // unaryOp is a function binding that takes a single argument. - unaryOp functions.UnaryOp - // binaryOp is a function binding that takes two arguments. - binaryOp functions.BinaryOp - // functionOp is a catch-all for zero-arity and three-plus arity functions. - functionOp functions.FunctionOp -} - -// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker -// and interpreter to optimize performance. -// -// The ID format is usually one of two styles: -// global: __ -// member: ___ -func (o *OverloadDecl) ID() string { - if o == nil { - return "" - } - return o.id -} - -// ArgTypes contains the set of argument types expected by the overload. -// -// For member functions ArgTypes[0] represents the member operand type. -func (o *OverloadDecl) ArgTypes() []*types.Type { - if o == nil { - return emptyArgs - } - return o.argTypes -} - -// IsMemberFunction indicates whether the overload is a member function -func (o *OverloadDecl) IsMemberFunction() bool { - if o == nil { - return false - } - return o.isMemberFunction -} - -// IsNonStrict returns whether the overload accepts errors and unknown values as arguments. -func (o *OverloadDecl) IsNonStrict() bool { - if o == nil { - return false - } - return o.nonStrict -} - -// OperandTrait returns the trait mask of the first operand to the overload call, e.g. -// `traits.Indexer` -func (o *OverloadDecl) OperandTrait() int { - if o == nil { - return 0 - } - return o.operandTrait -} - -// ResultType indicates the output type from calling the function. -func (o *OverloadDecl) ResultType() *types.Type { - if o == nil { - // *types.Type is nil-safe - return nil - } - return o.resultType -} - -// TypeParams returns the type parameter names associated with the overload. -func (o *OverloadDecl) TypeParams() []string { - typeParams := map[string]struct{}{} - collectParamNames(typeParams, o.ResultType()) - for _, arg := range o.ArgTypes() { - collectParamNames(typeParams, arg) - } - params := make([]string, 0, len(typeParams)) - for param := range typeParams { - params = append(params, param) - } - return params -} - -// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature. -// -// Result type, operand trait, and strict-ness are not considered as part of signature equality. -func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool { - if o == other { - return true - } - if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { - return false - } - for i, at := range o.ArgTypes() { - oat := other.ArgTypes()[i] - if !at.IsEquivalentType(oat) { - return false - } - } - return o.ResultType().IsEquivalentType(other.ResultType()) -} - -// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures. -// -// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type. -func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool { - if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { - return false - } - argsOverlap := true - for i, argType := range o.ArgTypes() { - otherArgType := other.ArgTypes()[i] - argsOverlap = argsOverlap && - (argType.IsAssignableType(otherArgType) || - otherArgType.IsAssignableType(argType)) - } - return argsOverlap -} - -// hasBinding indicates whether the overload already has a definition. -func (o *OverloadDecl) hasBinding() bool { - return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil) -} - -// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined. -func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp { - if o.unaryOp == nil { - return nil - } - return func(arg ref.Val) ref.Val { - if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) { - return MaybeNoSuchOverload(funcName, arg) - } - return o.unaryOp(arg) - } -} - -// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined. -func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp { - if o.binaryOp == nil { - return nil - } - return func(arg1, arg2 ref.Val) ref.Val { - if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) { - return MaybeNoSuchOverload(funcName, arg1, arg2) - } - return o.binaryOp(arg1, arg2) - } -} - -// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided. -func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp { - if o.functionOp == nil { - return nil - } - return func(args ...ref.Val) ref.Val { - if !o.matchesRuntimeSignature(disableTypeGuards, args...) { - return MaybeNoSuchOverload(funcName, args...) - } - return o.functionOp(args...) - } -} - -// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument. -func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool { - return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) && - matchOperandTrait(o.OperandTrait(), arg) -} - -// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. -func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool { - return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) && - matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) && - matchOperandTrait(o.OperandTrait(), arg1) -} - -// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. -func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool { - if len(args) != len(o.ArgTypes()) { - return false - } - if len(args) == 0 { - return true - } - for i, arg := range args { - if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) { - return false - } - } - return matchOperandTrait(o.OperandTrait(), args[0]) -} - -func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool { - if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) { - return true - } - if types.IsUnknownOrError(arg) { - return false - } - return disableTypeGuards || argType.IsAssignableRuntimeType(arg) -} - -func matchOperandTrait(trait int, arg ref.Val) bool { - return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg) -} - -// OverloadOpt is a functional option for configuring a function overload. -type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error) - -// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime -// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. -func UnaryBinding(binding functions.UnaryOp) OverloadOpt { - return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { - return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) - } - if len(o.ArgTypes()) != 1 { - return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID()) - } - o.unaryOp = binding - return o, nil - } -} - -// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime -// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. -func BinaryBinding(binding functions.BinaryOp) OverloadOpt { - return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { - return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) - } - if len(o.ArgTypes()) != 2 { - return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID()) - } - o.binaryOp = binding - return o, nil - } -} - -// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime -// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. -func FunctionBinding(binding functions.FunctionOp) OverloadOpt { - return func(o *OverloadDecl) (*OverloadDecl, error) { - if o.hasBinding() { - return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) - } - o.functionOp = binding - return o, nil - } -} - -// OverloadIsNonStrict enables the function to be called with error and unknown argument values. -// -// Note: do not use this option unless absoluately necessary as it should be an uncommon feature. -func OverloadIsNonStrict() OverloadOpt { - return func(o *OverloadDecl) (*OverloadDecl, error) { - o.nonStrict = true - return o, nil - } -} - -// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be -// successfully invoked. -func OverloadOperandTrait(trait int) OverloadOpt { - return func(o *OverloadDecl) (*OverloadDecl, error) { - o.operandTrait = trait - return o, nil - } -} - -// NewConstant creates a new constant declaration. -func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl { - return &VariableDecl{name: name, varType: t, value: v} -} - -// NewVariable creates a new variable declaration. -func NewVariable(name string, t *types.Type) *VariableDecl { - return &VariableDecl{name: name, varType: t} -} - -// VariableDecl defines a variable declaration which may optionally have a constant value. -type VariableDecl struct { - name string - varType *types.Type - value ref.Val -} - -// Name returns the fully-qualified variable name -func (v *VariableDecl) Name() string { - if v == nil { - return "" - } - return v.name -} - -// Type returns the types.Type value associated with the variable. -func (v *VariableDecl) Type() *types.Type { - if v == nil { - // types.Type is nil-safe - return nil - } - return v.varType -} - -// Value returns the constant value associated with the declaration. -func (v *VariableDecl) Value() ref.Val { - if v == nil { - return nil - } - return v.value -} - -// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input. -func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { - if v == other { - return true - } - return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) -} - -// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. -func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { - varType, err := types.TypeToExprType(v.Type()) - if err != nil { - return nil, err - } - return chkdecls.NewVar(v.Name(), varType), nil -} - -// TypeVariable creates a new type identifier for use within a types.Provider -func TypeVariable(t *types.Type) *VariableDecl { - return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) -} - -// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. -func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { - overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) - for i, oID := range f.overloadOrdinals { - o := f.overloads[oID] - paramNames := map[string]struct{}{} - argTypes := make([]*exprpb.Type, len(o.ArgTypes())) - for j, a := range o.ArgTypes() { - collectParamNames(paramNames, a) - at, err := types.TypeToExprType(a) - if err != nil { - return nil, err - } - argTypes[j] = at - } - collectParamNames(paramNames, o.ResultType()) - resultType, err := types.TypeToExprType(o.ResultType()) - if err != nil { - return nil, err - } - if len(paramNames) == 0 { - if o.IsMemberFunction() { - overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType) - } else { - overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType) - } - } else { - params := []string{} - for pn := range paramNames { - params = append(params, pn) - } - if o.IsMemberFunction() { - overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params) - } else { - overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params) - } - } - } - return chkdecls.NewFunction(f.Name(), overloads...), nil -} - -func collectParamNames(paramNames map[string]struct{}, arg *types.Type) { - if arg.Kind() == types.TypeParamKind { - paramNames[arg.TypeName()] = struct{}{} - } - for _, param := range arg.Parameters() { - collectParamNames(paramNames, param) - } -} - -var ( - emptyArgs = []*types.Type{} -) diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go index 774dcb5b48a..f91f7f8d109 100644 --- a/vendor/github.com/google/cel-go/common/error.go +++ b/vendor/github.com/google/cel-go/common/error.go @@ -22,16 +22,10 @@ import ( "golang.org/x/text/width" ) -// NewError creates an error associated with an expression id with the given message at the given location. -func NewError(id int64, message string, location Location) *Error { - return &Error{Message: message, Location: location, ExprID: id} -} - -// Error type which references an expression id, a location within source, and a message. +// Error type which references a location within source and a message. type Error struct { Location Location Message string - ExprID int64 } const ( diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go index 63919714ea7..1565085ab9a 100644 --- a/vendor/github.com/google/cel-go/common/errors.go +++ b/vendor/github.com/google/cel-go/common/errors.go @@ -22,7 +22,7 @@ import ( // Errors type which contains a list of errors observed during parsing. type Errors struct { - errors []*Error + errors []Error source Source numErrors int maxErrorsToReport int @@ -31,7 +31,7 @@ type Errors struct { // NewErrors creates a new instance of the Errors type. func NewErrors(source Source) *Errors { return &Errors{ - errors: []*Error{}, + errors: []Error{}, source: source, maxErrorsToReport: 100, } @@ -39,17 +39,11 @@ func NewErrors(source Source) *Errors { // ReportError records an error at a source location. func (e *Errors) ReportError(l Location, format string, args ...any) { - e.ReportErrorAtID(0, l, format, args...) -} - -// ReportErrorAtID records an error at a source location and expression id. -func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) { e.numErrors++ if e.numErrors > e.maxErrorsToReport { return } - err := &Error{ - ExprID: id, + err := Error{ Location: l, Message: fmt.Sprintf(format, args...), } @@ -57,12 +51,12 @@ func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...an } // GetErrors returns the list of observed errors. -func (e *Errors) GetErrors() []*Error { +func (e *Errors) GetErrors() []Error { return e.errors[:] } // Append creates a new Errors object with the current and input errors. -func (e *Errors) Append(errs []*Error) *Errors { +func (e *Errors) Append(errs []Error) *Errors { return &Errors{ errors: append(e.errors, errs...), source: e.source, diff --git a/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel deleted file mode 100644 index 3cc27d60ce3..00000000000 --- a/vendor/github.com/google/cel-go/common/functions/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "functions.go", - ], - importpath = "github.com/google/cel-go/common/functions", - deps = [ - "//common/types/ref:go_default_library", - ], -) diff --git a/vendor/github.com/google/cel-go/common/functions/functions.go b/vendor/github.com/google/cel-go/common/functions/functions.go deleted file mode 100644 index 67f4a5944e1..00000000000 --- a/vendor/github.com/google/cel-go/common/functions/functions.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package functions defines the standard builtin functions supported by the interpreter -package functions - -import "github.com/google/cel-go/common/types/ref" - -// Overload defines a named overload of a function, indicating an operand trait -// which must be present on the first argument to the overload as well as one -// of either a unary, binary, or function implementation. -// -// The majority of operators within the expression language are unary or binary -// and the specializations simplify the call contract for implementers of -// types with operator overloads. Any added complexity is assumed to be handled -// by the generic FunctionOp. -type Overload struct { - // Operator name as written in an expression or defined within - // operators.go. - Operator string - - // Operand trait used to dispatch the call. The zero-value indicates a - // global function overload or that one of the Unary / Binary / Function - // definitions should be used to execute the call. - OperandTrait int - - // Unary defines the overload with a UnaryOp implementation. May be nil. - Unary UnaryOp - - // Binary defines the overload with a BinaryOp implementation. May be nil. - Binary BinaryOp - - // Function defines the overload with a FunctionOp implementation. May be - // nil. - Function FunctionOp - - // NonStrict specifies whether the Overload will tolerate arguments that - // are types.Err or types.Unknown. - NonStrict bool -} - -// UnaryOp is a function that takes a single value and produces an output. -type UnaryOp func(value ref.Val) ref.Val - -// BinaryOp is a function that takes two values and produces an output. -type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val - -// FunctionOp is a function with accepts zero or more arguments and produces -// a value or error as a result. -type FunctionOp func(values ...ref.Val) ref.Val diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go index acf22bdf15e..52377d9308a 100644 --- a/vendor/github.com/google/cel-go/common/source.go +++ b/vendor/github.com/google/cel-go/common/source.go @@ -64,6 +64,7 @@ type sourceImpl struct { runes.Buffer description string lineOffsets []int32 + idOffsets map[int64]int32 } var _ runes.Buffer = &sourceImpl{} @@ -91,6 +92,7 @@ func NewStringSource(contents string, description string) Source { Buffer: runes.NewBuffer(contents), description: description, lineOffsets: offsets, + idOffsets: map[int64]int32{}, } } @@ -100,6 +102,7 @@ func NewInfoSource(info *exprpb.SourceInfo) Source { Buffer: runes.NewBuffer(""), description: info.GetLocation(), lineOffsets: info.GetLineOffsets(), + idOffsets: info.GetPositions(), } } diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel deleted file mode 100644 index c130a93f63f..00000000000 --- a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel +++ /dev/null @@ -1,25 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package( - default_visibility = ["//visibility:public"], - licenses = ["notice"], # Apache 2.0 -) - -go_library( - name = "go_default_library", - srcs = [ - "standard.go", - ], - importpath = "github.com/google/cel-go/common/stdlib", - deps = [ - "//checker/decls:go_default_library", - "//common/decls:go_default_library", - "//common/functions:go_default_library", - "//common/operators:go_default_library", - "//common/overloads:go_default_library", - "//common/types:go_default_library", - "//common/types/ref:go_default_library", - "//common/types/traits:go_default_library", - "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", - ], -) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go deleted file mode 100644 index d02cb64bf1f..00000000000 --- a/vendor/github.com/google/cel-go/common/stdlib/standard.go +++ /dev/null @@ -1,661 +0,0 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stdlib contains all of the standard library function declarations and definitions for CEL. -package stdlib - -import ( - "github.com/google/cel-go/common/decls" - "github.com/google/cel-go/common/functions" - "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/overloads" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -var ( - stdFunctions []*decls.FunctionDecl - stdFnDecls []*exprpb.Decl - stdTypes []*decls.VariableDecl - stdTypeDecls []*exprpb.Decl -) - -func init() { - paramA := types.NewTypeParamType("A") - paramB := types.NewTypeParamType("B") - listOfA := types.NewListType(paramA) - mapOfAB := types.NewMapType(paramA, paramB) - - stdTypes = []*decls.VariableDecl{ - decls.TypeVariable(types.BoolType), - decls.TypeVariable(types.BytesType), - decls.TypeVariable(types.DoubleType), - decls.TypeVariable(types.DurationType), - decls.TypeVariable(types.IntType), - decls.TypeVariable(listOfA), - decls.TypeVariable(mapOfAB), - decls.TypeVariable(types.NullType), - decls.TypeVariable(types.StringType), - decls.TypeVariable(types.TimestampType), - decls.TypeVariable(types.TypeType), - decls.TypeVariable(types.UintType), - } - - stdTypeDecls = make([]*exprpb.Decl, 0, len(stdTypes)) - for _, stdType := range stdTypes { - typeVar, err := decls.VariableDeclToExprDecl(stdType) - if err != nil { - panic(err) - } - stdTypeDecls = append(stdTypeDecls, typeVar) - } - - stdFunctions = []*decls.FunctionDecl{ - // Logical operators. Special-cased within the interpreter. - // Note, the singleton binding prevents extensions from overriding the operator behavior. - function(operators.Conditional, - decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA, - decls.OverloadIsNonStrict()), - decls.SingletonFunctionBinding(noFunctionOverrides)), - function(operators.LogicalAnd, - decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType, - decls.OverloadIsNonStrict()), - decls.SingletonBinaryBinding(noBinaryOverrides)), - function(operators.LogicalOr, - decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType, - decls.OverloadIsNonStrict()), - decls.SingletonBinaryBinding(noBinaryOverrides)), - function(operators.LogicalNot, - decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType), - decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { - b, ok := val.(types.Bool) - if !ok { - return types.MaybeNoSuchOverloadErr(val) - } - return b.Negate() - })), - - // Comprehension short-circuiting related function - function(operators.NotStrictlyFalse, - decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType, - decls.OverloadIsNonStrict(), - decls.UnaryBinding(notStrictlyFalse))), - // Deprecated: __not_strictly_false__ - function(operators.OldNotStrictlyFalse, - decls.DisableDeclaration(true), // safe deprecation - decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType, - decls.OverloadIsNonStrict(), - decls.UnaryBinding(notStrictlyFalse))), - - // Equality / inequality. Special-cased in the interpreter - function(operators.Equals, - decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType), - decls.SingletonBinaryBinding(noBinaryOverrides)), - function(operators.NotEquals, - decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType), - decls.SingletonBinaryBinding(noBinaryOverrides)), - - // Mathematical operators - function(operators.Add, - decls.Overload(overloads.AddBytes, - argTypes(types.BytesType, types.BytesType), types.BytesType), - decls.Overload(overloads.AddDouble, - argTypes(types.DoubleType, types.DoubleType), types.DoubleType), - decls.Overload(overloads.AddDurationDuration, - argTypes(types.DurationType, types.DurationType), types.DurationType), - decls.Overload(overloads.AddDurationTimestamp, - argTypes(types.DurationType, types.TimestampType), types.TimestampType), - decls.Overload(overloads.AddTimestampDuration, - argTypes(types.TimestampType, types.DurationType), types.TimestampType), - decls.Overload(overloads.AddInt64, - argTypes(types.IntType, types.IntType), types.IntType), - decls.Overload(overloads.AddList, - argTypes(listOfA, listOfA), listOfA), - decls.Overload(overloads.AddString, - argTypes(types.StringType, types.StringType), types.StringType), - decls.Overload(overloads.AddUint64, - argTypes(types.UintType, types.UintType), types.UintType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - return lhs.(traits.Adder).Add(rhs) - }, traits.AdderType)), - function(operators.Divide, - decls.Overload(overloads.DivideDouble, - argTypes(types.DoubleType, types.DoubleType), types.DoubleType), - decls.Overload(overloads.DivideInt64, - argTypes(types.IntType, types.IntType), types.IntType), - decls.Overload(overloads.DivideUint64, - argTypes(types.UintType, types.UintType), types.UintType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - return lhs.(traits.Divider).Divide(rhs) - }, traits.DividerType)), - function(operators.Modulo, - decls.Overload(overloads.ModuloInt64, - argTypes(types.IntType, types.IntType), types.IntType), - decls.Overload(overloads.ModuloUint64, - argTypes(types.UintType, types.UintType), types.UintType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - return lhs.(traits.Modder).Modulo(rhs) - }, traits.ModderType)), - function(operators.Multiply, - decls.Overload(overloads.MultiplyDouble, - argTypes(types.DoubleType, types.DoubleType), types.DoubleType), - decls.Overload(overloads.MultiplyInt64, - argTypes(types.IntType, types.IntType), types.IntType), - decls.Overload(overloads.MultiplyUint64, - argTypes(types.UintType, types.UintType), types.UintType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - return lhs.(traits.Multiplier).Multiply(rhs) - }, traits.MultiplierType)), - function(operators.Negate, - decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType), - decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType), - decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { - if types.IsBool(val) { - return types.MaybeNoSuchOverloadErr(val) - } - return val.(traits.Negater).Negate() - }, traits.NegatorType)), - function(operators.Subtract, - decls.Overload(overloads.SubtractDouble, - argTypes(types.DoubleType, types.DoubleType), types.DoubleType), - decls.Overload(overloads.SubtractDurationDuration, - argTypes(types.DurationType, types.DurationType), types.DurationType), - decls.Overload(overloads.SubtractInt64, - argTypes(types.IntType, types.IntType), types.IntType), - decls.Overload(overloads.SubtractTimestampDuration, - argTypes(types.TimestampType, types.DurationType), types.TimestampType), - decls.Overload(overloads.SubtractTimestampTimestamp, - argTypes(types.TimestampType, types.TimestampType), types.DurationType), - decls.Overload(overloads.SubtractUint64, - argTypes(types.UintType, types.UintType), types.UintType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - return lhs.(traits.Subtractor).Subtract(rhs) - }, traits.SubtractorType)), - - // Relations operators - - function(operators.Less, - decls.Overload(overloads.LessBool, - argTypes(types.BoolType, types.BoolType), types.BoolType), - decls.Overload(overloads.LessInt64, - argTypes(types.IntType, types.IntType), types.BoolType), - decls.Overload(overloads.LessInt64Double, - argTypes(types.IntType, types.DoubleType), types.BoolType), - decls.Overload(overloads.LessInt64Uint64, - argTypes(types.IntType, types.UintType), types.BoolType), - decls.Overload(overloads.LessUint64, - argTypes(types.UintType, types.UintType), types.BoolType), - decls.Overload(overloads.LessUint64Double, - argTypes(types.UintType, types.DoubleType), types.BoolType), - decls.Overload(overloads.LessUint64Int64, - argTypes(types.UintType, types.IntType), types.BoolType), - decls.Overload(overloads.LessDouble, - argTypes(types.DoubleType, types.DoubleType), types.BoolType), - decls.Overload(overloads.LessDoubleInt64, - argTypes(types.DoubleType, types.IntType), types.BoolType), - decls.Overload(overloads.LessDoubleUint64, - argTypes(types.DoubleType, types.UintType), types.BoolType), - decls.Overload(overloads.LessString, - argTypes(types.StringType, types.StringType), types.BoolType), - decls.Overload(overloads.LessBytes, - argTypes(types.BytesType, types.BytesType), types.BoolType), - decls.Overload(overloads.LessTimestamp, - argTypes(types.TimestampType, types.TimestampType), types.BoolType), - decls.Overload(overloads.LessDuration, - argTypes(types.DurationType, types.DurationType), types.BoolType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntNegOne { - return types.True - } - if cmp == types.IntOne || cmp == types.IntZero { - return types.False - } - return cmp - }, traits.ComparerType)), - - function(operators.LessEquals, - decls.Overload(overloads.LessEqualsBool, - argTypes(types.BoolType, types.BoolType), types.BoolType), - decls.Overload(overloads.LessEqualsInt64, - argTypes(types.IntType, types.IntType), types.BoolType), - decls.Overload(overloads.LessEqualsInt64Double, - argTypes(types.IntType, types.DoubleType), types.BoolType), - decls.Overload(overloads.LessEqualsInt64Uint64, - argTypes(types.IntType, types.UintType), types.BoolType), - decls.Overload(overloads.LessEqualsUint64, - argTypes(types.UintType, types.UintType), types.BoolType), - decls.Overload(overloads.LessEqualsUint64Double, - argTypes(types.UintType, types.DoubleType), types.BoolType), - decls.Overload(overloads.LessEqualsUint64Int64, - argTypes(types.UintType, types.IntType), types.BoolType), - decls.Overload(overloads.LessEqualsDouble, - argTypes(types.DoubleType, types.DoubleType), types.BoolType), - decls.Overload(overloads.LessEqualsDoubleInt64, - argTypes(types.DoubleType, types.IntType), types.BoolType), - decls.Overload(overloads.LessEqualsDoubleUint64, - argTypes(types.DoubleType, types.UintType), types.BoolType), - decls.Overload(overloads.LessEqualsString, - argTypes(types.StringType, types.StringType), types.BoolType), - decls.Overload(overloads.LessEqualsBytes, - argTypes(types.BytesType, types.BytesType), types.BoolType), - decls.Overload(overloads.LessEqualsTimestamp, - argTypes(types.TimestampType, types.TimestampType), types.BoolType), - decls.Overload(overloads.LessEqualsDuration, - argTypes(types.DurationType, types.DurationType), types.BoolType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntNegOne || cmp == types.IntZero { - return types.True - } - if cmp == types.IntOne { - return types.False - } - return cmp - }, traits.ComparerType)), - - function(operators.Greater, - decls.Overload(overloads.GreaterBool, - argTypes(types.BoolType, types.BoolType), types.BoolType), - decls.Overload(overloads.GreaterInt64, - argTypes(types.IntType, types.IntType), types.BoolType), - decls.Overload(overloads.GreaterInt64Double, - argTypes(types.IntType, types.DoubleType), types.BoolType), - decls.Overload(overloads.GreaterInt64Uint64, - argTypes(types.IntType, types.UintType), types.BoolType), - decls.Overload(overloads.GreaterUint64, - argTypes(types.UintType, types.UintType), types.BoolType), - decls.Overload(overloads.GreaterUint64Double, - argTypes(types.UintType, types.DoubleType), types.BoolType), - decls.Overload(overloads.GreaterUint64Int64, - argTypes(types.UintType, types.IntType), types.BoolType), - decls.Overload(overloads.GreaterDouble, - argTypes(types.DoubleType, types.DoubleType), types.BoolType), - decls.Overload(overloads.GreaterDoubleInt64, - argTypes(types.DoubleType, types.IntType), types.BoolType), - decls.Overload(overloads.GreaterDoubleUint64, - argTypes(types.DoubleType, types.UintType), types.BoolType), - decls.Overload(overloads.GreaterString, - argTypes(types.StringType, types.StringType), types.BoolType), - decls.Overload(overloads.GreaterBytes, - argTypes(types.BytesType, types.BytesType), types.BoolType), - decls.Overload(overloads.GreaterTimestamp, - argTypes(types.TimestampType, types.TimestampType), types.BoolType), - decls.Overload(overloads.GreaterDuration, - argTypes(types.DurationType, types.DurationType), types.BoolType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntOne { - return types.True - } - if cmp == types.IntNegOne || cmp == types.IntZero { - return types.False - } - return cmp - }, traits.ComparerType)), - - function(operators.GreaterEquals, - decls.Overload(overloads.GreaterEqualsBool, - argTypes(types.BoolType, types.BoolType), types.BoolType), - decls.Overload(overloads.GreaterEqualsInt64, - argTypes(types.IntType, types.IntType), types.BoolType), - decls.Overload(overloads.GreaterEqualsInt64Double, - argTypes(types.IntType, types.DoubleType), types.BoolType), - decls.Overload(overloads.GreaterEqualsInt64Uint64, - argTypes(types.IntType, types.UintType), types.BoolType), - decls.Overload(overloads.GreaterEqualsUint64, - argTypes(types.UintType, types.UintType), types.BoolType), - decls.Overload(overloads.GreaterEqualsUint64Double, - argTypes(types.UintType, types.DoubleType), types.BoolType), - decls.Overload(overloads.GreaterEqualsUint64Int64, - argTypes(types.UintType, types.IntType), types.BoolType), - decls.Overload(overloads.GreaterEqualsDouble, - argTypes(types.DoubleType, types.DoubleType), types.BoolType), - decls.Overload(overloads.GreaterEqualsDoubleInt64, - argTypes(types.DoubleType, types.IntType), types.BoolType), - decls.Overload(overloads.GreaterEqualsDoubleUint64, - argTypes(types.DoubleType, types.UintType), types.BoolType), - decls.Overload(overloads.GreaterEqualsString, - argTypes(types.StringType, types.StringType), types.BoolType), - decls.Overload(overloads.GreaterEqualsBytes, - argTypes(types.BytesType, types.BytesType), types.BoolType), - decls.Overload(overloads.GreaterEqualsTimestamp, - argTypes(types.TimestampType, types.TimestampType), types.BoolType), - decls.Overload(overloads.GreaterEqualsDuration, - argTypes(types.DurationType, types.DurationType), types.BoolType), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - cmp := lhs.(traits.Comparer).Compare(rhs) - if cmp == types.IntOne || cmp == types.IntZero { - return types.True - } - if cmp == types.IntNegOne { - return types.False - } - return cmp - }, traits.ComparerType)), - - // Indexing - function(operators.Index, - decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA), - decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB), - decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { - return lhs.(traits.Indexer).Get(rhs) - }, traits.IndexerType)), - - // Collections operators - function(operators.In, - decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), - decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), - decls.SingletonBinaryBinding(inAggregate)), - function(operators.OldIn, - decls.DisableDeclaration(true), // safe deprecation - decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), - decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), - decls.SingletonBinaryBinding(inAggregate)), - function(overloads.DeprecatedIn, - decls.DisableDeclaration(true), // safe deprecation - decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), - decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), - decls.SingletonBinaryBinding(inAggregate)), - function(overloads.Size, - decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType), - decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType), - decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType), - decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType), - decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType), - decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType), - decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType), - decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType), - decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { - return val.(traits.Sizer).Size() - }, traits.SizerType)), - - // Type conversions - function(overloads.TypeConvertType, - decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)), - decls.SingletonUnaryBinding(convertToType(types.TypeType))), - - // Bool conversions - function(overloads.TypeConvertBool, - decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType, - decls.UnaryBinding(convertToType(types.BoolType)))), - - // Bytes conversions - function(overloads.TypeConvertBytes, - decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType, - decls.UnaryBinding(convertToType(types.BytesType)))), - - // Double conversions - function(overloads.TypeConvertDouble, - decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType, - decls.UnaryBinding(convertToType(types.DoubleType))), - decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType, - decls.UnaryBinding(convertToType(types.DoubleType))), - decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType, - decls.UnaryBinding(convertToType(types.DoubleType)))), - - // Duration conversions - function(overloads.TypeConvertDuration, - decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType, - decls.UnaryBinding(convertToType(types.DurationType))), - decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType, - decls.UnaryBinding(convertToType(types.DurationType)))), - - // Dyn conversions - function(overloads.TypeConvertDyn, - decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType), - decls.SingletonUnaryBinding(identity)), - - // Int conversions - function(overloads.TypeConvertInt, - decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType, - decls.UnaryBinding(convertToType(types.IntType))), - decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType, - decls.UnaryBinding(convertToType(types.IntType))), - decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType, - decls.UnaryBinding(convertToType(types.IntType))), - decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType, - decls.UnaryBinding(convertToType(types.IntType))), - decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType, - decls.UnaryBinding(convertToType(types.IntType))), - ), - - // String conversions - function(overloads.TypeConvertString, - decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType))), - decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType))), - decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType))), - decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType))), - decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType))), - decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType))), - decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType, - decls.UnaryBinding(convertToType(types.StringType)))), - - // Timestamp conversions - function(overloads.TypeConvertTimestamp, - decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType, - decls.UnaryBinding(convertToType(types.TimestampType))), - decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType, - decls.UnaryBinding(convertToType(types.TimestampType)))), - - // Uint conversions - function(overloads.TypeConvertUint, - decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType, - decls.UnaryBinding(identity)), - decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType, - decls.UnaryBinding(convertToType(types.UintType))), - decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType, - decls.UnaryBinding(convertToType(types.UintType))), - decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType, - decls.UnaryBinding(convertToType(types.UintType)))), - - // String functions - function(overloads.Contains, - decls.MemberOverload(overloads.ContainsString, - argTypes(types.StringType, types.StringType), types.BoolType, - decls.BinaryBinding(types.StringContains)), - decls.DisableTypeGuards(true)), - function(overloads.EndsWith, - decls.MemberOverload(overloads.EndsWithString, - argTypes(types.StringType, types.StringType), types.BoolType, - decls.BinaryBinding(types.StringEndsWith)), - decls.DisableTypeGuards(true)), - function(overloads.StartsWith, - decls.MemberOverload(overloads.StartsWithString, - argTypes(types.StringType, types.StringType), types.BoolType, - decls.BinaryBinding(types.StringStartsWith)), - decls.DisableTypeGuards(true)), - function(overloads.Matches, - decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType), - decls.MemberOverload(overloads.MatchesString, - argTypes(types.StringType, types.StringType), types.BoolType), - decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val { - return str.(traits.Matcher).Match(pat) - }, traits.MatcherType)), - - // Timestamp / duration functions - function(overloads.TimeGetFullYear, - decls.MemberOverload(overloads.TimestampToYear, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToYearWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType)), - - function(overloads.TimeGetMonth, - decls.MemberOverload(overloads.TimestampToMonth, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToMonthWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType)), - - function(overloads.TimeGetDayOfYear, - decls.MemberOverload(overloads.TimestampToDayOfYear, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToDayOfYearWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType)), - - function(overloads.TimeGetDayOfMonth, - decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType)), - - function(overloads.TimeGetDate, - decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType)), - - function(overloads.TimeGetDayOfWeek, - decls.MemberOverload(overloads.TimestampToDayOfWeek, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType)), - - function(overloads.TimeGetHours, - decls.MemberOverload(overloads.TimestampToHours, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToHoursWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType), - decls.MemberOverload(overloads.DurationToHours, - argTypes(types.DurationType), types.IntType)), - - function(overloads.TimeGetMinutes, - decls.MemberOverload(overloads.TimestampToMinutes, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToMinutesWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType), - decls.MemberOverload(overloads.DurationToMinutes, - argTypes(types.DurationType), types.IntType)), - - function(overloads.TimeGetSeconds, - decls.MemberOverload(overloads.TimestampToSeconds, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToSecondsWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType), - decls.MemberOverload(overloads.DurationToSeconds, - argTypes(types.DurationType), types.IntType)), - - function(overloads.TimeGetMilliseconds, - decls.MemberOverload(overloads.TimestampToMilliseconds, - argTypes(types.TimestampType), types.IntType), - decls.MemberOverload(overloads.TimestampToMillisecondsWithTz, - argTypes(types.TimestampType, types.StringType), types.IntType), - decls.MemberOverload(overloads.DurationToMilliseconds, - argTypes(types.DurationType), types.IntType)), - } - - stdFnDecls = make([]*exprpb.Decl, 0, len(stdFunctions)) - for _, fn := range stdFunctions { - if fn.IsDeclarationDisabled() { - continue - } - ed, err := decls.FunctionDeclToExprDecl(fn) - if err != nil { - panic(err) - } - stdFnDecls = append(stdFnDecls, ed) - } -} - -// Functions returns the set of standard library function declarations and definitions for CEL. -func Functions() []*decls.FunctionDecl { - return stdFunctions -} - -// FunctionExprDecls returns the legacy style protobuf-typed declarations for all functions and overloads -// in the CEL standard environment. -// -// Deprecated: use Functions -func FunctionExprDecls() []*exprpb.Decl { - return stdFnDecls -} - -// Types returns the set of standard library types for CEL. -func Types() []*decls.VariableDecl { - return stdTypes -} - -// TypeExprDecls returns the legacy style protobuf-typed declarations for all types in the CEL -// standard environment. -// -// Deprecated: use Types -func TypeExprDecls() []*exprpb.Decl { - return stdTypeDecls -} - -func notStrictlyFalse(value ref.Val) ref.Val { - if types.IsBool(value) { - return value - } - return types.True -} - -func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { - if rhs.Type().HasTrait(traits.ContainerType) { - return rhs.(traits.Container).Contains(lhs) - } - return types.ValOrErr(rhs, "no such overload") -} - -func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl { - fn, err := decls.NewFunction(name, opts...) - if err != nil { - panic(err) - } - return fn -} - -func argTypes(args ...*types.Type) []*types.Type { - return args -} - -func noBinaryOverrides(rhs, lhs ref.Val) ref.Val { - return types.NoSuchOverloadErr() -} - -func noFunctionOverrides(args ...ref.Val) ref.Val { - return types.NoSuchOverloadErr() -} - -func identity(val ref.Val) ref.Val { - return val -} - -func convertToType(t ref.Type) functions.UnaryOp { - return func(val ref.Val) ref.Val { - return val.ConvertToType(t) - } -} diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel index b5e44ffbf14..89c4feacbfc 100644 --- a/vendor/github.com/google/cel-go/common/types/BUILD.bazel +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -27,20 +27,20 @@ go_library( "provider.go", "string.go", "timestamp.go", - "types.go", + "type.go", "uint.go", "unknown.go", "util.go", ], importpath = "github.com/google/cel-go/common/types", deps = [ - "//checker/decls:go_default_library", "//common/overloads:go_default_library", "//common/types/pb:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", "@com_github_stoewer_go_strcase//:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", "@org_golang_google_protobuf//encoding/protojson:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", @@ -71,9 +71,8 @@ go_test( "provider_test.go", "string_test.go", "timestamp_test.go", - "types_test.go", + "type_test.go", "uint_test.go", - "unknown_test.go", "util_test.go", ], embed = [":go_default_library"], diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go index 565734f3ff5..a634ecc2879 100644 --- a/vendor/github.com/google/cel-go/common/types/bool.go +++ b/vendor/github.com/google/cel-go/common/types/bool.go @@ -20,6 +20,7 @@ import ( "strconv" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -30,6 +31,11 @@ import ( type Bool bool var ( + // BoolType singleton. + BoolType = NewTypeValue("bool", + traits.ComparerType, + traits.NegatorType) + // boolWrapperType golang reflected type for protobuf bool wrapper type. boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{}) ) diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go index 5838755f8b3..bef190759fd 100644 --- a/vendor/github.com/google/cel-go/common/types/bytes.go +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -22,6 +22,7 @@ import ( "unicode/utf8" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -33,6 +34,12 @@ import ( type Bytes []byte var ( + // BytesType singleton. + BytesType = NewTypeValue("bytes", + traits.AdderType, + traits.ComparerType, + traits.SizerType) + // byteWrapperType golang reflected type for protobuf bytes wrapper type. byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{}) ) diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go index 027e789786b..bda9f31a6bf 100644 --- a/vendor/github.com/google/cel-go/common/types/double.go +++ b/vendor/github.com/google/cel-go/common/types/double.go @@ -20,6 +20,7 @@ import ( "reflect" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -31,6 +32,15 @@ import ( type Double float64 var ( + // DoubleType singleton. + DoubleType = NewTypeValue("double", + traits.AdderType, + traits.ComparerType, + traits.DividerType, + traits.MultiplierType, + traits.NegatorType, + traits.SubtractorType) + // doubleWrapperType reflected type for protobuf double wrapper type. doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{}) diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go index 596e56d6b03..c90ac1bee95 100644 --- a/vendor/github.com/google/cel-go/common/types/duration.go +++ b/vendor/github.com/google/cel-go/common/types/duration.go @@ -22,6 +22,7 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" dpb "google.golang.org/protobuf/types/known/durationpb" @@ -40,14 +41,13 @@ func durationOf(d time.Duration) Duration { } var ( - durationValueType = reflect.TypeOf(&dpb.Duration{}) - - durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{ - overloads.TimeGetHours: DurationGetHours, - overloads.TimeGetMinutes: DurationGetMinutes, - overloads.TimeGetSeconds: DurationGetSeconds, - overloads.TimeGetMilliseconds: DurationGetMilliseconds, - } + // DurationType singleton. + DurationType = NewTypeValue("google.protobuf.Duration", + traits.AdderType, + traits.ComparerType, + traits.NegatorType, + traits.ReceiverType, + traits.SubtractorType) ) // Add implements traits.Adder.Add. @@ -156,7 +156,7 @@ func (d Duration) Negate() ref.Val { func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val { if len(args) == 0 { if f, found := durationZeroArgOverloads[function]; found { - return f(d) + return f(d.Duration) } } return NoSuchOverloadErr() @@ -185,38 +185,20 @@ func (d Duration) Value() any { return d.Duration } -// DurationGetHours returns the duration in hours. -func DurationGetHours(val ref.Val) ref.Val { - dur, ok := val.(Duration) - if !ok { - return MaybeNoSuchOverloadErr(val) - } - return Int(dur.Hours()) -} - -// DurationGetMinutes returns duration in minutes. -func DurationGetMinutes(val ref.Val) ref.Val { - dur, ok := val.(Duration) - if !ok { - return MaybeNoSuchOverloadErr(val) - } - return Int(dur.Minutes()) -} - -// DurationGetSeconds returns duration in seconds. -func DurationGetSeconds(val ref.Val) ref.Val { - dur, ok := val.(Duration) - if !ok { - return MaybeNoSuchOverloadErr(val) - } - return Int(dur.Seconds()) -} +var ( + durationValueType = reflect.TypeOf(&dpb.Duration{}) -// DurationGetMilliseconds returns duration in milliseconds. -func DurationGetMilliseconds(val ref.Val) ref.Val { - dur, ok := val.(Duration) - if !ok { - return MaybeNoSuchOverloadErr(val) - } - return Int(dur.Milliseconds()) -} + durationZeroArgOverloads = map[string]func(time.Duration) ref.Val{ + overloads.TimeGetHours: func(dur time.Duration) ref.Val { + return Int(dur.Hours()) + }, + overloads.TimeGetMinutes: func(dur time.Duration) ref.Val { + return Int(dur.Minutes()) + }, + overloads.TimeGetSeconds: func(dur time.Duration) ref.Val { + return Int(dur.Seconds()) + }, + overloads.TimeGetMilliseconds: func(dur time.Duration) ref.Val { + return Int(dur.Milliseconds()) + }} +) diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go index aa8f94b4f85..b4874d9d4d1 100644 --- a/vendor/github.com/google/cel-go/common/types/err.go +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -35,7 +35,7 @@ type Err struct { var ( // ErrType singleton. - ErrType = NewOpaqueType("error") + ErrType = NewTypeValue("error") // errDivideByZero is an error indicating a division by zero of an integer value. errDivideByZero = errors.New("division by zero") @@ -129,11 +129,6 @@ func (e *Err) Is(target error) bool { return e.error.Error() == target.Error() } -// Unwrap implements errors.Unwrap. -func (e *Err) Unwrap() error { - return e.error -} - // IsError returns whether the input element ref.Type or ref.Val is equal to // the ErrType singleton. func IsError(val ref.Val) bool { diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go index 940772aed1f..f5a9511c8d1 100644 --- a/vendor/github.com/google/cel-go/common/types/int.go +++ b/vendor/github.com/google/cel-go/common/types/int.go @@ -22,6 +22,7 @@ import ( "time" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -40,6 +41,16 @@ const ( ) var ( + // IntType singleton. + IntType = NewTypeValue("int", + traits.AdderType, + traits.ComparerType, + traits.DividerType, + traits.ModderType, + traits.MultiplierType, + traits.NegatorType, + traits.SubtractorType) + // int32WrapperType reflected type for protobuf int32 wrapper type. int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{}) diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go index 98e9147b6ee..9f224ad4ffa 100644 --- a/vendor/github.com/google/cel-go/common/types/iterator.go +++ b/vendor/github.com/google/cel-go/common/types/iterator.go @@ -24,7 +24,7 @@ import ( var ( // IteratorType singleton. - IteratorType = NewObjectType("iterator", traits.IteratorType) + IteratorType = NewTypeValue("iterator", traits.IteratorType) ) // baseIterator is the basis for list, map, and object iterators. diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go index d4932b4a90c..de5f2099bf7 100644 --- a/vendor/github.com/google/cel-go/common/types/list.go +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -29,15 +29,25 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" ) +var ( + // ListType singleton. + ListType = NewTypeValue("list", + traits.AdderType, + traits.ContainerType, + traits.IndexerType, + traits.IterableType, + traits.SizerType) +) + // NewDynamicList returns a traits.Lister with heterogenous elements. // value should be an array of "native" types, i.e. any type that // NativeToValue() can convert to a ref.Val. -func NewDynamicList(adapter Adapter, value any) traits.Lister { +func NewDynamicList(adapter ref.TypeAdapter, value any) traits.Lister { refValue := reflect.ValueOf(value) return &baseList{ - Adapter: adapter, - value: value, - size: refValue.Len(), + TypeAdapter: adapter, + value: value, + size: refValue.Len(), get: func(i int) any { return refValue.Index(i).Interface() }, @@ -45,56 +55,56 @@ func NewDynamicList(adapter Adapter, value any) traits.Lister { } // NewStringList returns a traits.Lister containing only strings. -func NewStringList(adapter Adapter, elems []string) traits.Lister { +func NewStringList(adapter ref.TypeAdapter, elems []string) traits.Lister { return &baseList{ - Adapter: adapter, - value: elems, - size: len(elems), - get: func(i int) any { return elems[i] }, + TypeAdapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, } } // NewRefValList returns a traits.Lister with ref.Val elements. // // This type specialization is used with list literals within CEL expressions. -func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister { +func NewRefValList(adapter ref.TypeAdapter, elems []ref.Val) traits.Lister { return &baseList{ - Adapter: adapter, - value: elems, - size: len(elems), - get: func(i int) any { return elems[i] }, + TypeAdapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, } } // NewProtoList returns a traits.Lister based on a pb.List instance. -func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister { +func NewProtoList(adapter ref.TypeAdapter, list protoreflect.List) traits.Lister { return &baseList{ - Adapter: adapter, - value: list, - size: list.Len(), - get: func(i int) any { return list.Get(i).Interface() }, + TypeAdapter: adapter, + value: list, + size: list.Len(), + get: func(i int) any { return list.Get(i).Interface() }, } } // NewJSONList returns a traits.Lister based on structpb.ListValue instance. -func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister { +func NewJSONList(adapter ref.TypeAdapter, l *structpb.ListValue) traits.Lister { vals := l.GetValues() return &baseList{ - Adapter: adapter, - value: l, - size: len(vals), - get: func(i int) any { return vals[i] }, + TypeAdapter: adapter, + value: l, + size: len(vals), + get: func(i int) any { return vals[i] }, } } // NewMutableList creates a new mutable list whose internal state can be modified. -func NewMutableList(adapter Adapter) traits.MutableLister { +func NewMutableList(adapter ref.TypeAdapter) traits.MutableLister { var mutableValues []ref.Val l := &mutableList{ baseList: &baseList{ - Adapter: adapter, - value: mutableValues, - size: 0, + TypeAdapter: adapter, + value: mutableValues, + size: 0, }, mutableValues: mutableValues, } @@ -106,9 +116,9 @@ func NewMutableList(adapter Adapter) traits.MutableLister { // baseList points to a list containing elements of any type. // The `value` is an array of native values, and refValue is its reflection object. -// The `Adapter` enables native type to CEL type conversions. +// The `ref.TypeAdapter` enables native type to CEL type conversions. type baseList struct { - Adapter + ref.TypeAdapter value any // size indicates the number of elements within the list. @@ -133,9 +143,9 @@ func (l *baseList) Add(other ref.Val) ref.Val { return l } return &concatList{ - Adapter: l.Adapter, - prevList: l, - nextList: otherList} + TypeAdapter: l.TypeAdapter, + prevList: l, + nextList: otherList} } // Contains implements the traits.Container interface method. @@ -312,13 +322,13 @@ func (l *mutableList) Add(other ref.Val) ref.Val { func (l *mutableList) ToImmutableList() traits.Lister { // The reference to internal state is guaranteed to be safe as this call is only performed // when mutations have been completed. - return NewRefValList(l.Adapter, l.mutableValues) + return NewRefValList(l.TypeAdapter, l.mutableValues) } // concatList combines two list implementations together into a view. -// The `Adapter` enables native type to CEL type conversions. +// The `ref.TypeAdapter` enables native type to CEL type conversions. type concatList struct { - Adapter + ref.TypeAdapter value any prevList traits.Lister nextList traits.Lister @@ -337,9 +347,9 @@ func (l *concatList) Add(other ref.Val) ref.Val { return l } return &concatList{ - Adapter: l.Adapter, - prevList: l, - nextList: otherList} + TypeAdapter: l.TypeAdapter, + prevList: l, + nextList: otherList} } // Contains implements the traits.Container interface method. @@ -366,7 +376,7 @@ func (l *concatList) Contains(elem ref.Val) ref.Val { // ConvertToNative implements the ref.Val interface method. func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) { - combined := NewDynamicList(l.Adapter, l.Value().([]any)) + combined := NewDynamicList(l.TypeAdapter, l.Value().([]any)) return combined.ConvertToNative(typeDesc) } diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go index 739b7aab00d..213be4ac9e3 100644 --- a/vendor/github.com/google/cel-go/common/types/map.go +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -32,10 +32,10 @@ import ( ) // NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs. -func NewDynamicMap(adapter Adapter, value any) traits.Mapper { +func NewDynamicMap(adapter ref.TypeAdapter, value any) traits.Mapper { refValue := reflect.ValueOf(value) return &baseMap{ - Adapter: adapter, + TypeAdapter: adapter, mapAccessor: newReflectMapAccessor(adapter, refValue), value: value, size: refValue.Len(), @@ -46,10 +46,10 @@ func NewDynamicMap(adapter Adapter, value any) traits.Mapper { // encoded in protocol buffer form. // // The `adapter` argument provides type adaptation capabilities from proto to CEL. -func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper { +func NewJSONStruct(adapter ref.TypeAdapter, value *structpb.Struct) traits.Mapper { fields := value.GetFields() return &baseMap{ - Adapter: adapter, + TypeAdapter: adapter, mapAccessor: newJSONStructAccessor(adapter, fields), value: value, size: len(fields), @@ -57,9 +57,9 @@ func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper { } // NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values. -func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper { +func NewRefValMap(adapter ref.TypeAdapter, value map[ref.Val]ref.Val) traits.Mapper { return &baseMap{ - Adapter: adapter, + TypeAdapter: adapter, mapAccessor: newRefValMapAccessor(value), value: value, size: len(value), @@ -67,9 +67,9 @@ func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper { } // NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values. -func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper { +func NewStringInterfaceMap(adapter ref.TypeAdapter, value map[string]any) traits.Mapper { return &baseMap{ - Adapter: adapter, + TypeAdapter: adapter, mapAccessor: newStringIfaceMapAccessor(adapter, value), value: value, size: len(value), @@ -77,9 +77,9 @@ func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper } // NewStringStringMap returns a specialized traits.Mapper with string keys and values. -func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper { +func NewStringStringMap(adapter ref.TypeAdapter, value map[string]string) traits.Mapper { return &baseMap{ - Adapter: adapter, + TypeAdapter: adapter, mapAccessor: newStringMapAccessor(value), value: value, size: len(value), @@ -87,13 +87,22 @@ func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper } // NewProtoMap returns a specialized traits.Mapper for handling protobuf map values. -func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { +func NewProtoMap(adapter ref.TypeAdapter, value *pb.Map) traits.Mapper { return &protoMap{ - Adapter: adapter, - value: value, + TypeAdapter: adapter, + value: value, } } +var ( + // MapType singleton. + MapType = NewTypeValue("map", + traits.ContainerType, + traits.IndexerType, + traits.IterableType, + traits.SizerType) +) + // mapAccessor is a private interface for finding values within a map and iterating over the keys. // This interface implements portions of the API surface area required by the traits.Mapper // interface. @@ -112,7 +121,7 @@ type mapAccessor interface { // Since CEL is side-effect free, the base map represents an immutable object. type baseMap struct { // TypeAdapter used to convert keys and values accessed within the map. - Adapter + ref.TypeAdapter // mapAccessor interface implementation used to find and iterate over map keys. mapAccessor @@ -307,15 +316,15 @@ func (m *baseMap) Value() any { return m.value } -func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { +func newJSONStructAccessor(adapter ref.TypeAdapter, st map[string]*structpb.Value) mapAccessor { return &jsonStructAccessor{ - Adapter: adapter, - st: st, + TypeAdapter: adapter, + st: st, } } type jsonStructAccessor struct { - Adapter + ref.TypeAdapter st map[string]*structpb.Value } @@ -350,17 +359,17 @@ func (a *jsonStructAccessor) Iterator() traits.Iterator { } } -func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { +func newReflectMapAccessor(adapter ref.TypeAdapter, value reflect.Value) mapAccessor { keyType := value.Type().Key() return &reflectMapAccessor{ - Adapter: adapter, - refValue: value, - keyType: keyType, + TypeAdapter: adapter, + refValue: value, + keyType: keyType, } } type reflectMapAccessor struct { - Adapter + ref.TypeAdapter refValue reflect.Value keyType reflect.Type } @@ -418,9 +427,9 @@ func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) { // Iterator creates a Golang reflection based traits.Iterator. func (m *reflectMapAccessor) Iterator() traits.Iterator { return &mapIterator{ - Adapter: m.Adapter, - mapKeys: m.refValue.MapRange(), - len: m.refValue.Len(), + TypeAdapter: m.TypeAdapter, + mapKeys: m.refValue.MapRange(), + len: m.refValue.Len(), } } @@ -471,9 +480,9 @@ func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) { // Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection. func (a *refValMapAccessor) Iterator() traits.Iterator { return &mapIterator{ - Adapter: DefaultTypeAdapter, - mapKeys: reflect.ValueOf(a.mapVal).MapRange(), - len: len(a.mapVal), + TypeAdapter: DefaultTypeAdapter, + mapKeys: reflect.ValueOf(a.mapVal).MapRange(), + len: len(a.mapVal), } } @@ -515,15 +524,15 @@ func (a *stringMapAccessor) Iterator() traits.Iterator { } } -func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { +func newStringIfaceMapAccessor(adapter ref.TypeAdapter, mapVal map[string]any) mapAccessor { return &stringIfaceMapAccessor{ - Adapter: adapter, - mapVal: mapVal, + TypeAdapter: adapter, + mapVal: mapVal, } } type stringIfaceMapAccessor struct { - Adapter + ref.TypeAdapter mapVal map[string]any } @@ -560,7 +569,7 @@ func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { // protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to // accessing protoreflect.Map values. type protoMap struct { - Adapter + ref.TypeAdapter value *pb.Map } @@ -763,9 +772,9 @@ func (m *protoMap) Iterator() traits.Iterator { return true }) return &protoMapIterator{ - Adapter: m.Adapter, - mapKeys: mapKeys, - len: m.value.Len(), + TypeAdapter: m.TypeAdapter, + mapKeys: mapKeys, + len: m.value.Len(), } } @@ -786,7 +795,7 @@ func (m *protoMap) Value() any { type mapIterator struct { *baseIterator - Adapter + ref.TypeAdapter mapKeys *reflect.MapIter cursor int len int @@ -809,7 +818,7 @@ func (it *mapIterator) Next() ref.Val { type protoMapIterator struct { *baseIterator - Adapter + ref.TypeAdapter mapKeys []protoreflect.MapKey cursor int len int diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go index 926ca3dc980..38927a112cd 100644 --- a/vendor/github.com/google/cel-go/common/types/null.go +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -30,6 +30,8 @@ import ( type Null structpb.NullValue var ( + // NullType singleton. + NullType = NewTypeValue("null_type") // NullValue singleton. NullValue = Null(structpb.NullValue_NULL_VALUE) diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go index 8ba0af9fbe1..9955e2dce59 100644 --- a/vendor/github.com/google/cel-go/common/types/object.go +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -29,10 +29,10 @@ import ( ) type protoObj struct { - Adapter + ref.TypeAdapter value proto.Message typeDesc *pb.TypeDescription - typeValue ref.Val + typeValue *TypeValue } // NewObject returns an object based on a proto.Message value which handles @@ -42,15 +42,15 @@ type protoObj struct { // Note: the type value is pulled from the list of registered types within the // type provider. If the proto type is not registered within the type provider, // then this will result in an error within the type adapter / provider. -func NewObject(adapter Adapter, +func NewObject(adapter ref.TypeAdapter, typeDesc *pb.TypeDescription, - typeValue ref.Val, + typeValue *TypeValue, value proto.Message) ref.Val { return &protoObj{ - Adapter: adapter, - value: value, - typeDesc: typeDesc, - typeValue: typeValue} + TypeAdapter: adapter, + value: value, + typeDesc: typeDesc, + typeValue: typeValue} } func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) { @@ -157,7 +157,7 @@ func (o *protoObj) Get(index ref.Val) ref.Val { } func (o *protoObj) Type() ref.Type { - return o.typeValue.(ref.Type) + return o.typeValue } func (o *protoObj) Value() any { diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go index a9f30aed015..54cb35b1ab9 100644 --- a/vendor/github.com/google/cel-go/common/types/optional.go +++ b/vendor/github.com/google/cel-go/common/types/optional.go @@ -24,7 +24,7 @@ import ( var ( // OptionalType indicates the runtime type of an optional value. - OptionalType = NewOpaqueType("optional") + OptionalType = NewTypeValue("optional") // OptionalNone is a sentinel value which is used to indicate an empty optional value. OptionalNone = &Optional{} diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go index 6cc95c276dd..df9532156a0 100644 --- a/vendor/github.com/google/cel-go/common/types/pb/type.go +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -285,7 +285,7 @@ func (fd *FieldDescription) GetFrom(target any) (any, error) { // IsEnum returns true if the field type refers to an enum value. func (fd *FieldDescription) IsEnum() bool { - return fd.ProtoKind() == protoreflect.EnumKind + return fd.desc.Kind() == protoreflect.EnumKind } // IsMap returns true if the field is of map type. @@ -295,7 +295,7 @@ func (fd *FieldDescription) IsMap() bool { // IsMessage returns true if the field is of message type. func (fd *FieldDescription) IsMessage() bool { - kind := fd.ProtoKind() + kind := fd.desc.Kind() return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind } @@ -326,11 +326,6 @@ func (fd *FieldDescription) Name() string { return string(fd.desc.Name()) } -// ProtoKind returns the protobuf reflected kind of the field. -func (fd *FieldDescription) ProtoKind() protoreflect.Kind { - return fd.desc.Kind() -} - // ReflectType returns the Golang reflect.Type for this field. func (fd *FieldDescription) ReflectType() reflect.Type { return fd.reflectType @@ -350,17 +345,17 @@ func (fd *FieldDescription) Zero() proto.Message { } func (fd *FieldDescription) typeDefToType() *exprpb.Type { - if fd.IsMessage() { + if fd.desc.Kind() == protoreflect.MessageKind || fd.desc.Kind() == protoreflect.GroupKind { msgType := string(fd.desc.Message().FullName()) if wk, found := CheckedWellKnowns[msgType]; found { return wk } return checkedMessageType(msgType) } - if fd.IsEnum() { + if fd.desc.Kind() == protoreflect.EnumKind { return checkedInt } - return CheckedPrimitives[fd.ProtoKind()] + return CheckedPrimitives[fd.desc.Kind()] } // Map wraps the protoreflect.Map object with a key and value FieldDescription for use in @@ -468,13 +463,13 @@ func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, er unwrappedAny := &anypb.Any{} err := Merge(unwrappedAny, msg) if err != nil { - return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err) + return nil, false, err } dynMsg, err := unwrappedAny.UnmarshalNew() if err != nil { // Allow the error to move further up the stack as it should result in an type // conversion error if the caller does not recover it somehow. - return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err) + return nil, false, err } // Attempt to unwrap the dynamic type, otherwise return the dynamic message. unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect()) @@ -565,10 +560,8 @@ func zeroValueOf(msg proto.Message) proto.Message { } var ( - jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value" - zeroValueMap = map[string]proto.Message{ - "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL}, + "google.protobuf.Any": &anypb.Any{}, "google.protobuf.Duration": &dpb.Duration{}, "google.protobuf.ListValue": &structpb.ListValue{}, "google.protobuf.Struct": &structpb.Struct{}, diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go index e80b4622e24..e66951f5b24 100644 --- a/vendor/github.com/google/cel-go/common/types/provider.go +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -33,64 +33,17 @@ import ( tpb "google.golang.org/protobuf/types/known/timestamppb" ) -// Adapter converts native Go values of varying type and complexity to equivalent CEL values. -type Adapter = ref.TypeAdapter - -// Provider specifies functions for creating new object instances and for resolving -// enum values by name. -type Provider interface { - // EnumValue returns the numeric value of the given enum value name. - EnumValue(enumName string) ref.Val - - // FindIdent takes a qualified identifier name and returns a ref.Val if one exists. - FindIdent(identName string) (ref.Val, bool) - - // FindStructType returns the Type give a qualified type name. - // - // For historical reasons, only struct types are expected to be returned through this - // method, and the type values are expected to be wrapped in a TypeType instance using - // TypeTypeWithParam(). - // - // Returns false if not found. - FindStructType(structType string) (*Type, bool) - - // FieldStructFieldType returns the field type for a checked type value. Returns - // false if the field could not be found. - FindStructFieldType(structType, fieldName string) (*FieldType, bool) - - // NewValue creates a new type value from a qualified name and map of field - // name to value. - // - // Note, for each value, the Val.ConvertToNative function will be invoked - // to convert the Val to the field's native type. If an error occurs during - // conversion, the NewValue will be a types.Err. - NewValue(structType string, fields map[string]ref.Val) ref.Val -} - -// FieldType represents a field's type value and whether that field supports presence detection. -type FieldType struct { - // Type of the field as a CEL native type value. - Type *Type - - // IsSet indicates whether the field is set on an input object. - IsSet ref.FieldTester - - // GetFrom retrieves the field value on the input object, if set. - GetFrom ref.FieldGetter -} - -// Registry provides type information for a set of registered types. -type Registry struct { - revTypeMap map[string]*Type +type protoTypeRegistry struct { + revTypeMap map[string]ref.Type pbdb *pb.Db } // NewRegistry accepts a list of proto message instances and returns a type // provider which can create new instances of the provided message or any // message that proto depends upon in its FileDescriptor. -func NewRegistry(types ...proto.Message) (*Registry, error) { - p := &Registry{ - revTypeMap: make(map[string]*Type), +func NewRegistry(types ...proto.Message) (ref.TypeRegistry, error) { + p := &protoTypeRegistry{ + revTypeMap: make(map[string]ref.Type), pbdb: pb.NewDb(), } err := p.RegisterType( @@ -126,17 +79,18 @@ func NewRegistry(types ...proto.Message) (*Registry, error) { } // NewEmptyRegistry returns a registry which is completely unconfigured. -func NewEmptyRegistry() *Registry { - return &Registry{ - revTypeMap: make(map[string]*Type), +func NewEmptyRegistry() ref.TypeRegistry { + return &protoTypeRegistry{ + revTypeMap: make(map[string]ref.Type), pbdb: pb.NewDb(), } } -// Copy copies the current state of the registry into its own memory space. -func (p *Registry) Copy() *Registry { - copy := &Registry{ - revTypeMap: make(map[string]*Type), +// Copy implements the ref.TypeRegistry interface method which copies the current state of the +// registry into its own memory space. +func (p *protoTypeRegistry) Copy() ref.TypeRegistry { + copy := &protoTypeRegistry{ + revTypeMap: make(map[string]ref.Type), pbdb: p.pbdb.Copy(), } for k, v := range p.revTypeMap { @@ -145,8 +99,7 @@ func (p *Registry) Copy() *Registry { return copy } -// EnumValue returns the numeric value of the given enum value name. -func (p *Registry) EnumValue(enumName string) ref.Val { +func (p *protoTypeRegistry) EnumValue(enumName string) ref.Val { enumVal, found := p.pbdb.DescribeEnum(enumName) if !found { return NewErr("unknown enum name '%s'", enumName) @@ -154,12 +107,9 @@ func (p *Registry) EnumValue(enumName string) ref.Val { return Int(enumVal.Value()) } -// FieldFieldType returns the field type for a checked type value. Returns false if -// the field could not be found. -// -// Deprecated: use FindStructFieldType -func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { - msgType, found := p.pbdb.DescribeType(structType) +func (p *protoTypeRegistry) FindFieldType(messageType string, + fieldName string) (*ref.FieldType, bool) { + msgType, found := p.pbdb.DescribeType(messageType) if !found { return nil, false } @@ -168,32 +118,15 @@ func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, return nil, false } return &ref.FieldType{ - Type: field.CheckedType(), - IsSet: field.IsSet, - GetFrom: field.GetFrom}, true -} - -// FieldStructFieldType returns the field type for a checked type value. Returns -// false if the field could not be found. -func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) { - msgType, found := p.pbdb.DescribeType(structType) - if !found { - return nil, false - } - field, found := msgType.FieldByName(fieldName) - if !found { - return nil, false - } - return &FieldType{ - Type: fieldDescToCELType(field), - IsSet: field.IsSet, - GetFrom: field.GetFrom}, true + Type: field.CheckedType(), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, + true } -// FindIdent takes a qualified identifier name and returns a ref.Val if one exists. -func (p *Registry) FindIdent(identName string) (ref.Val, bool) { +func (p *protoTypeRegistry) FindIdent(identName string) (ref.Val, bool) { if t, found := p.revTypeMap[identName]; found { - return t, true + return t.(ref.Val), true } if enumVal, found := p.pbdb.DescribeEnum(identName); found { return Int(enumVal.Value()), true @@ -201,50 +134,24 @@ func (p *Registry) FindIdent(identName string) (ref.Val, bool) { return nil, false } -// FindType looks up the Type given a qualified typeName. Returns false if not found. -// -// Deprecated: use FindStructType -func (p *Registry) FindType(structType string) (*exprpb.Type, bool) { - if _, found := p.pbdb.DescribeType(structType); !found { +func (p *protoTypeRegistry) FindType(typeName string) (*exprpb.Type, bool) { + if _, found := p.pbdb.DescribeType(typeName); !found { return nil, false } - if structType != "" && structType[0] == '.' { - structType = structType[1:] + if typeName != "" && typeName[0] == '.' { + typeName = typeName[1:] } return &exprpb.Type{ TypeKind: &exprpb.Type_Type{ Type: &exprpb.Type{ TypeKind: &exprpb.Type_MessageType{ - MessageType: structType}}}}, true -} - -// FindStructType returns the Type give a qualified type name. -// -// For historical reasons, only struct types are expected to be returned through this -// method, and the type values are expected to be wrapped in a TypeType instance using -// TypeTypeWithParam(). -// -// Returns false if not found. -func (p *Registry) FindStructType(structType string) (*Type, bool) { - if _, found := p.pbdb.DescribeType(structType); !found { - return nil, false - } - if structType != "" && structType[0] == '.' { - structType = structType[1:] - } - return NewTypeTypeWithParam(NewObjectType(structType)), true + MessageType: typeName}}}}, true } -// NewValue creates a new type value from a qualified name and map of field -// name to value. -// -// Note, for each value, the Val.ConvertToNative function will be invoked -// to convert the Val to the field's native type. If an error occurs during -// conversion, the NewValue will be a types.Err. -func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val { - td, found := p.pbdb.DescribeType(structType) +func (p *protoTypeRegistry) NewValue(typeName string, fields map[string]ref.Val) ref.Val { + td, found := p.pbdb.DescribeType(typeName) if !found { - return NewErr("unknown type '%s'", structType) + return NewErr("unknown type '%s'", typeName) } msg := td.New() fieldMap := td.FieldMap() @@ -261,8 +168,7 @@ func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Va return p.NativeToValue(msg.Interface()) } -// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. -func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { +func (p *protoTypeRegistry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { fd, err := p.pbdb.RegisterDescriptor(fileDesc) if err != nil { return err @@ -270,8 +176,7 @@ func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) erro return p.registerAllTypes(fd) } -// RegisterMessage registers a protocol buffer message and its dependencies. -func (p *Registry) RegisterMessage(message proto.Message) error { +func (p *protoTypeRegistry) RegisterMessage(message proto.Message) error { fd, err := p.pbdb.RegisterMessage(message) if err != nil { return err @@ -279,32 +184,11 @@ func (p *Registry) RegisterMessage(message proto.Message) error { return p.registerAllTypes(fd) } -// RegisterType registers a type value with the provider which ensures the provider is aware of how to -// map the type to an identifier. -// -// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name. -// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the -// traits present on the input and the runtime type name. By default this foreign type will be treated -// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the -// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions -// to CEL, even when they're not based on protobuf types. -func (p *Registry) RegisterType(types ...ref.Type) error { +func (p *protoTypeRegistry) RegisterType(types ...ref.Type) error { for _, t := range types { - celType := maybeForeignType(t) - existing, found := p.revTypeMap[t.TypeName()] - if !found { - p.revTypeMap[t.TypeName()] = celType - continue - } - if !existing.IsEquivalentType(celType) { - return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType) - } - if existing.traitMask != celType.traitMask { - return fmt.Errorf( - "type registered with conflicting traits: %v with traits %v, input: %v", - existing.TypeName(), existing.traitMask, celType.traitMask) - } + p.revTypeMap[t.TypeName()] = t } + // TODO: generate an error when the type name is registered more than once. return nil } @@ -312,7 +196,7 @@ func (p *Registry) RegisterType(types ...ref.Type) error { // providing support for custom proto-based types. // // This method should be the inverse of ref.Val.ConvertToNative. -func (p *Registry) NativeToValue(value any) ref.Val { +func (p *protoTypeRegistry) NativeToValue(value any) ref.Val { if val, found := nativeToValue(p, value); found { return val } @@ -334,7 +218,7 @@ func (p *Registry) NativeToValue(value any) ref.Val { if !found { return NewErr("unknown type: '%s'", typeName) } - return NewObject(p, td, typeVal, v) + return NewObject(p, td, typeVal.(*TypeValue), v) case *pb.Map: return NewProtoMap(p, v) case protoreflect.List: @@ -347,13 +231,8 @@ func (p *Registry) NativeToValue(value any) ref.Val { return UnsupportedRefValConversionErr(value) } -func (p *Registry) registerAllTypes(fd *pb.FileDescription) error { +func (p *protoTypeRegistry) registerAllTypes(fd *pb.FileDescription) error { for _, typeName := range fd.GetTypeNames() { - // skip well-known type names since they're automatically sanitized - // during NewObjectType() calls. - if _, found := checkedWellKnowns[typeName]; found { - continue - } err := p.RegisterType(NewObjectTypeValue(typeName)) if err != nil { return err @@ -362,28 +241,6 @@ func (p *Registry) registerAllTypes(fd *pb.FileDescription) error { return nil } -func fieldDescToCELType(field *pb.FieldDescription) *Type { - if field.IsMap() { - return NewMapType( - singularFieldDescToCELType(field.KeyType), - singularFieldDescToCELType(field.ValueType)) - } - if field.IsList() { - return NewListType(singularFieldDescToCELType(field)) - } - return singularFieldDescToCELType(field) -} - -func singularFieldDescToCELType(field *pb.FieldDescription) *Type { - if field.IsMessage() { - return NewObjectType(string(field.Descriptor().Message().FullName())) - } - if field.IsEnum() { - return IntType - } - return ProtoCELPrimitives[field.ProtoKind()] -} - // defaultTypeAdapter converts go native types to CEL values. type defaultTypeAdapter struct{} @@ -402,7 +259,7 @@ func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val { // nativeToValue returns the converted (ref.Val, true) of a conversion is found, // otherwise (nil, false) -func nativeToValue(a Adapter, value any) (ref.Val, bool) { +func nativeToValue(a ref.TypeAdapter, value any) (ref.Val, bool) { switch v := value.(type) { case nil: return NullValue, true @@ -690,24 +547,3 @@ func fieldTypeConversionError(field *pb.FieldDescription, err error) error { msgName := field.Descriptor().ContainingMessage().FullName() return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err) } - -var ( - // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type. - ProtoCELPrimitives = map[protoreflect.Kind]*Type{ - protoreflect.BoolKind: BoolType, - protoreflect.BytesKind: BytesType, - protoreflect.DoubleKind: DoubleType, - protoreflect.FloatKind: DoubleType, - protoreflect.Int32Kind: IntType, - protoreflect.Int64Kind: IntType, - protoreflect.Sint32Kind: IntType, - protoreflect.Sint64Kind: IntType, - protoreflect.Uint32Kind: UintType, - protoreflect.Uint64Kind: UintType, - protoreflect.Fixed32Kind: UintType, - protoreflect.Fixed64Kind: UintType, - protoreflect.Sfixed32Kind: IntType, - protoreflect.Sfixed64Kind: IntType, - protoreflect.StringKind: StringType, - } -) diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go index b9820023d6f..7eabbb9ca38 100644 --- a/vendor/github.com/google/cel-go/common/types/ref/provider.go +++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go @@ -23,34 +23,34 @@ import ( // TypeProvider specifies functions for creating new object instances and for // resolving enum values by name. -// -// Deprecated: use types.Provider type TypeProvider interface { // EnumValue returns the numeric value of the given enum value name. EnumValue(enumName string) Val - // FindIdent takes a qualified identifier name and returns a Value if one exists. + // FindIdent takes a qualified identifier name and returns a Value if one + // exists. FindIdent(identName string) (Val, bool) - // FindType looks up the Type given a qualified typeName. Returns false if not found. + // FindType looks up the Type given a qualified typeName. Returns false + // if not found. + // + // Used during type-checking only. FindType(typeName string) (*exprpb.Type, bool) - // FieldFieldType returns the field type for a checked type value. Returns false if - // the field could not be found. - FindFieldType(messageType, fieldName string) (*FieldType, bool) + // FieldFieldType returns the field type for a checked type value. Returns + // false if the field could not be found. + FindFieldType(messageType string, fieldName string) (*FieldType, bool) - // NewValue creates a new type value from a qualified name and map of field name - // to value. + // NewValue creates a new type value from a qualified name and map of field + // name to value. // - // Note, for each value, the Val.ConvertToNative function will be invoked to convert - // the Val to the field's native type. If an error occurs during conversion, the - // NewValue will be a types.Err. + // Note, for each value, the Val.ConvertToNative function will be invoked + // to convert the Val to the field's native type. If an error occurs during + // conversion, the NewValue will be a types.Err. NewValue(typeName string, fields map[string]Val) Val } // TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values. -// -// Deprecated: use types.Adapter type TypeAdapter interface { // NativeToValue converts the input `value` to a CEL `ref.Val`. NativeToValue(value any) Val @@ -60,8 +60,6 @@ type TypeAdapter interface { // implementations support type-customization, so these features are optional. However, a // `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types // which are registered can be converted to CEL representations. -// -// Deprecated: use types.Registry type TypeRegistry interface { TypeAdapter TypeProvider @@ -78,14 +76,15 @@ type TypeRegistry interface { // If a type is provided more than once with an alternative definition, the // call will result in an error. RegisterType(types ...Type) error + + // Copy the TypeRegistry and return a new registry whose mutable state is isolated. + Copy() TypeRegistry } // FieldType represents a field's type value and whether that field supports // presence detection. -// -// Deprecated: use types.FieldType type FieldType struct { - // Type of the field as a protobuf type value. + // Type of the field. Type *exprpb.Type // IsSet indicates whether the field is set on an input object. diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go index 028e6824d22..a65cc14e4c5 100644 --- a/vendor/github.com/google/cel-go/common/types/string.go +++ b/vendor/github.com/google/cel-go/common/types/string.go @@ -24,6 +24,7 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -35,10 +36,18 @@ import ( type String string var ( - stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{ - overloads.Contains: StringContains, - overloads.EndsWith: StringEndsWith, - overloads.StartsWith: StringStartsWith, + // StringType singleton. + StringType = NewTypeValue("string", + traits.AdderType, + traits.ComparerType, + traits.MatcherType, + traits.ReceiverType, + traits.SizerType) + + stringOneArgOverloads = map[string]func(String, ref.Val) ref.Val{ + overloads.Contains: stringContains, + overloads.EndsWith: stringEndsWith, + overloads.StartsWith: stringStartsWith, } stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{}) @@ -189,41 +198,26 @@ func (s String) Value() any { return string(s) } -// StringContains returns whether the string contains a substring. -func StringContains(s, sub ref.Val) ref.Val { - str, ok := s.(String) - if !ok { - return MaybeNoSuchOverloadErr(s) - } +func stringContains(s String, sub ref.Val) ref.Val { subStr, ok := sub.(String) if !ok { return MaybeNoSuchOverloadErr(sub) } - return Bool(strings.Contains(string(str), string(subStr))) + return Bool(strings.Contains(string(s), string(subStr))) } -// StringEndsWith returns whether the target string contains the input suffix. -func StringEndsWith(s, suf ref.Val) ref.Val { - str, ok := s.(String) - if !ok { - return MaybeNoSuchOverloadErr(s) - } +func stringEndsWith(s String, suf ref.Val) ref.Val { sufStr, ok := suf.(String) if !ok { return MaybeNoSuchOverloadErr(suf) } - return Bool(strings.HasSuffix(string(str), string(sufStr))) + return Bool(strings.HasSuffix(string(s), string(sufStr))) } -// StringStartsWith returns whether the target string contains the input prefix. -func StringStartsWith(s, pre ref.Val) ref.Val { - str, ok := s.(String) - if !ok { - return MaybeNoSuchOverloadErr(s) - } +func stringStartsWith(s String, pre ref.Val) ref.Val { preStr, ok := pre.(String) if !ok { return MaybeNoSuchOverloadErr(pre) } - return Bool(strings.HasPrefix(string(str), string(preStr))) + return Bool(strings.HasPrefix(string(s), string(preStr))) } diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go index 33acdea8ef7..c784f2e54be 100644 --- a/vendor/github.com/google/cel-go/common/types/timestamp.go +++ b/vendor/github.com/google/cel-go/common/types/timestamp.go @@ -23,6 +23,7 @@ import ( "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -52,6 +53,15 @@ const ( maxUnixTime int64 = 253402300799 ) +var ( + // TimestampType singleton. + TimestampType = NewTypeValue("google.protobuf.Timestamp", + traits.AdderType, + traits.ComparerType, + traits.ReceiverType, + traits.SubtractorType) +) + // Add implements traits.Adder.Add. func (t Timestamp) Add(other ref.Val) ref.Val { switch other.Type() { diff --git a/vendor/github.com/google/cel-go/common/types/type.go b/vendor/github.com/google/cel-go/common/types/type.go new file mode 100644 index 00000000000..164a4605033 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/type.go @@ -0,0 +1,102 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +var ( + // TypeType is the type of a TypeValue. + TypeType = NewTypeValue("type") +) + +// TypeValue is an instance of a Value that describes a value's type. +type TypeValue struct { + name string + traitMask int +} + +// NewTypeValue returns *TypeValue which is both a ref.Type and ref.Val. +func NewTypeValue(name string, traits ...int) *TypeValue { + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &TypeValue{ + name: name, + traitMask: traitMask} +} + +// NewObjectTypeValue returns a *TypeValue based on the input name, which is +// annotated with the traits relevant to all objects. +func NewObjectTypeValue(name string) *TypeValue { + return NewTypeValue(name, + traits.FieldTesterType, + traits.IndexerType) +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t *TypeValue) ConvertToNative(typeDesc reflect.Type) (any, error) { + // TODO: replace the internal type representation with a proto-value. + return nil, fmt.Errorf("type conversion not supported for 'type'") +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t *TypeValue) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case TypeType: + return TypeType + case StringType: + return String(t.TypeName()) + } + return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (t *TypeValue) Equal(other ref.Val) ref.Val { + otherType, ok := other.(ref.Type) + return Bool(ok && t.TypeName() == otherType.TypeName()) +} + +// HasTrait indicates whether the type supports the given trait. +// Trait codes are defined in the traits package, e.g. see traits.AdderType. +func (t *TypeValue) HasTrait(trait int) bool { + return trait&t.traitMask == trait +} + +// String implements fmt.Stringer. +func (t *TypeValue) String() string { + return t.name +} + +// Type implements ref.Val.Type. +func (t *TypeValue) Type() ref.Type { + return TypeType +} + +// TypeName gives the type's name as a string. +func (t *TypeValue) TypeName() string { + return t.name +} + +// Value implements ref.Val.Value. +func (t *TypeValue) Value() any { + return t.name +} diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go deleted file mode 100644 index 76624eefdee..00000000000 --- a/vendor/github.com/google/cel-go/common/types/types.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "reflect" - "strings" - - chkdecls "github.com/google/cel-go/checker/decls" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" - - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" -) - -// Kind indicates a CEL type's kind which is used to differentiate quickly between simple -// and complex types. -type Kind uint - -const ( - // UnspecifiedKind is returned when the type is nil or its kind is not specified. - UnspecifiedKind Kind = iota - - // DynKind represents a dynamic type. This kind only exists at type-check time. - DynKind - - // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. - // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf - // well-known types. - AnyKind - - // BoolKind represents a boolean type. - BoolKind - - // BytesKind represents a bytes type. - BytesKind - - // DoubleKind represents a double type. - DoubleKind - - // DurationKind represents a CEL duration type. - DurationKind - - // ErrorKind represents a CEL error type. - ErrorKind - - // IntKind represents an integer type. - IntKind - - // ListKind represents a list type. - ListKind - - // MapKind represents a map type. - MapKind - - // NullTypeKind represents a null type. - NullTypeKind - - // OpaqueKind represents an abstract type which has no accessible fields. - OpaqueKind - - // StringKind represents a string type. - StringKind - - // StructKind represents a structured object with typed fields. - StructKind - - // TimestampKind represents a a CEL time type. - TimestampKind - - // TypeKind represents the CEL type. - TypeKind - - // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. - TypeParamKind - - // UintKind represents a uint type. - UintKind - - // UnknownKind represents an unknown value type. - UnknownKind -) - -var ( - // AnyType represents the google.protobuf.Any type. - AnyType = &Type{ - kind: AnyKind, - runtimeTypeName: "google.protobuf.Any", - traitMask: traits.FieldTesterType | - traits.IndexerType, - } - // BoolType represents the bool type. - BoolType = &Type{ - kind: BoolKind, - runtimeTypeName: "bool", - traitMask: traits.ComparerType | - traits.NegatorType, - } - // BytesType represents the bytes type. - BytesType = &Type{ - kind: BytesKind, - runtimeTypeName: "bytes", - traitMask: traits.AdderType | - traits.ComparerType | - traits.SizerType, - } - // DoubleType represents the double type. - DoubleType = &Type{ - kind: DoubleKind, - runtimeTypeName: "double", - traitMask: traits.AdderType | - traits.ComparerType | - traits.DividerType | - traits.MultiplierType | - traits.NegatorType | - traits.SubtractorType, - } - // DurationType represents the CEL duration type. - DurationType = &Type{ - kind: DurationKind, - runtimeTypeName: "google.protobuf.Duration", - traitMask: traits.AdderType | - traits.ComparerType | - traits.NegatorType | - traits.ReceiverType | - traits.SubtractorType, - } - // DynType represents a dynamic CEL type whose type will be determined at runtime from context. - DynType = &Type{ - kind: DynKind, - runtimeTypeName: "dyn", - } - // ErrorType represents a CEL error value. - ErrorType = &Type{ - kind: ErrorKind, - runtimeTypeName: "error", - } - // IntType represents the int type. - IntType = &Type{ - kind: IntKind, - runtimeTypeName: "int", - traitMask: traits.AdderType | - traits.ComparerType | - traits.DividerType | - traits.ModderType | - traits.MultiplierType | - traits.NegatorType | - traits.SubtractorType, - } - // ListType represents the runtime list type. - ListType = NewListType(nil) - // MapType represents the runtime map type. - MapType = NewMapType(nil, nil) - // NullType represents the type of a null value. - NullType = &Type{ - kind: NullTypeKind, - runtimeTypeName: "null_type", - } - // StringType represents the string type. - StringType = &Type{ - kind: StringKind, - runtimeTypeName: "string", - traitMask: traits.AdderType | - traits.ComparerType | - traits.MatcherType | - traits.ReceiverType | - traits.SizerType, - } - // TimestampType represents the time type. - TimestampType = &Type{ - kind: TimestampKind, - runtimeTypeName: "google.protobuf.Timestamp", - traitMask: traits.AdderType | - traits.ComparerType | - traits.ReceiverType | - traits.SubtractorType, - } - // TypeType represents a CEL type - TypeType = &Type{ - kind: TypeKind, - runtimeTypeName: "type", - } - // UintType represents a uint type. - UintType = &Type{ - kind: UintKind, - runtimeTypeName: "uint", - traitMask: traits.AdderType | - traits.ComparerType | - traits.DividerType | - traits.ModderType | - traits.MultiplierType | - traits.SubtractorType, - } - // UnknownType represents an unknown value type. - UnknownType = &Type{ - kind: UnknownKind, - runtimeTypeName: "unknown", - } -) - -var _ ref.Type = &Type{} -var _ ref.Val = &Type{} - -// Type holds a reference to a runtime type with an optional type-checked set of type parameters. -type Type struct { - // kind indicates general category of the type. - kind Kind - - // parameters holds the optional type-checked set of type Parameters that are used during static analysis. - parameters []*Type - - // runtimeTypeName indicates the runtime type name of the type. - runtimeTypeName string - - // isAssignableType function determines whether one type is assignable to this type. - // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters. - isAssignableType func(other *Type) bool - - // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type. - // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name. - isAssignableRuntimeType func(other ref.Val) bool - - // traitMask is a mask of flags which indicate the capabilities of the type. - traitMask int -} - -// ConvertToNative implements ref.Val.ConvertToNative. -func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) { - return nil, fmt.Errorf("type conversion not supported for 'type'") -} - -// ConvertToType implements ref.Val.ConvertToType. -func (t *Type) ConvertToType(typeVal ref.Type) ref.Val { - switch typeVal { - case TypeType: - return TypeType - case StringType: - return String(t.TypeName()) - } - return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) -} - -// Equal indicates whether two types have the same runtime type name. -// -// The name Equal is a bit of a misnomer, but for historical reasons, this is the -// runtime behavior. For a more accurate definition see IsType(). -func (t *Type) Equal(other ref.Val) ref.Val { - otherType, ok := other.(ref.Type) - return Bool(ok && t.TypeName() == otherType.TypeName()) -} - -// HasTrait implements the ref.Type interface method. -func (t *Type) HasTrait(trait int) bool { - return trait&t.traitMask == trait -} - -// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names. -func (t *Type) IsExactType(other *Type) bool { - return t.isTypeInternal(other, true) -} - -// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names. -func (t *Type) IsEquivalentType(other *Type) bool { - return t.isTypeInternal(other, false) -} - -// Kind indicates general category of the type. -func (t *Type) Kind() Kind { - if t == nil { - return UnspecifiedKind - } - return t.kind -} - -// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag. -func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool { - if t == nil { - return false - } - if t == other { - return true - } - if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) { - return false - } - if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() { - return false - } - for i, p := range t.Parameters() { - if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) { - return false - } - } - return true -} - -// IsAssignableType determines whether the current type is type-check assignable from the input fromType. -func (t *Type) IsAssignableType(fromType *Type) bool { - if t == nil { - return false - } - if t.isAssignableType != nil { - return t.isAssignableType(fromType) - } - return t.defaultIsAssignableType(fromType) -} - -// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType. -// -// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string) -// will have a runtime assignable type of a map. -func (t *Type) IsAssignableRuntimeType(val ref.Val) bool { - if t == nil { - return false - } - if t.isAssignableRuntimeType != nil { - return t.isAssignableRuntimeType(val) - } - return t.defaultIsAssignableRuntimeType(val) -} - -// Parameters returns the list of type parameters if set. -// -// For ListKind, Parameters()[0] represents the list element type -// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map -// value type. -func (t *Type) Parameters() []*Type { - if t == nil { - return emptyParams - } - return t.parameters -} - -// DeclaredTypeName indicates the fully qualified and parameterized type-check type name. -func (t *Type) DeclaredTypeName() string { - // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type. - if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) { - return fmt.Sprintf("wrapper(%s)", t.TypeName()) - } - return t.TypeName() -} - -// Type implements the ref.Val interface method. -func (t *Type) Type() ref.Type { - return TypeType -} - -// Value implements the ref.Val interface method. -func (t *Type) Value() any { - return t.TypeName() -} - -// TypeName returns the type-erased fully qualified runtime type name. -// -// TypeName implements the ref.Type interface method. -func (t *Type) TypeName() string { - if t == nil { - return "" - } - return t.runtimeTypeName -} - -// String returns a human-readable definition of the type name. -func (t *Type) String() string { - if len(t.Parameters()) == 0 { - return t.DeclaredTypeName() - } - params := make([]string, len(t.Parameters())) - for i, p := range t.Parameters() { - params[i] = p.String() - } - return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", ")) -} - -// isDyn indicates whether the type is dynamic in any way. -func (t *Type) isDyn() bool { - k := t.Kind() - return k == DynKind || k == AnyKind || k == TypeParamKind -} - -// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another -// where any of the following may return a true result: -// - The from types are the same instance -// - The target type is dynamic -// - The fromType has the same kind and type name as the target type, and all parameters of the target type -// -// are IsAssignableType() from the parameters of the fromType. -func (t *Type) defaultIsAssignableType(fromType *Type) bool { - if t == fromType || t.isDyn() { - return true - } - if t.Kind() != fromType.Kind() || - t.TypeName() != fromType.TypeName() || - len(t.Parameters()) != len(fromType.Parameters()) { - return false - } - for i, tp := range t.Parameters() { - fp := fromType.Parameters()[i] - if !tp.IsAssignableType(fp) { - return false - } - } - return true -} - -// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types -// to determine whether a ref.Val is assignable to the declared type for a function signature. -func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool { - valType := val.Type() - // If the current type and value type don't agree, then return - if !(t.isDyn() || t.TypeName() == valType.TypeName()) { - return false - } - switch t.Kind() { - case ListKind: - elemType := t.Parameters()[0] - l := val.(traits.Lister) - if l.Size() == IntZero { - return true - } - it := l.Iterator() - elemVal := it.Next() - return elemType.IsAssignableRuntimeType(elemVal) - case MapKind: - keyType := t.Parameters()[0] - elemType := t.Parameters()[1] - m := val.(traits.Mapper) - if m.Size() == IntZero { - return true - } - it := m.Iterator() - keyVal := it.Next() - elemVal := m.Get(keyVal) - return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal) - } - return true -} - -// NewListType creates an instances of a list type value with the provided element type. -func NewListType(elemType *Type) *Type { - return &Type{ - kind: ListKind, - parameters: []*Type{elemType}, - runtimeTypeName: "list", - traitMask: traits.AdderType | - traits.ContainerType | - traits.IndexerType | - traits.IterableType | - traits.SizerType, - } -} - -// NewMapType creates an instance of a map type value with the provided key and value types. -func NewMapType(keyType, valueType *Type) *Type { - return &Type{ - kind: MapKind, - parameters: []*Type{keyType, valueType}, - runtimeTypeName: "map", - traitMask: traits.ContainerType | - traits.IndexerType | - traits.IterableType | - traits.SizerType, - } -} - -// NewNullableType creates an instance of a nullable type with the provided wrapped type. -// -// Note: only primitive types are supported as wrapped types. -func NewNullableType(wrapped *Type) *Type { - return &Type{ - kind: wrapped.Kind(), - parameters: wrapped.Parameters(), - runtimeTypeName: wrapped.TypeName(), - traitMask: wrapped.traitMask, - isAssignableType: func(other *Type) bool { - return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other) - }, - isAssignableRuntimeType: func(other ref.Val) bool { - return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other) - }, - } -} - -// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. -func NewOptionalType(param *Type) *Type { - return NewOpaqueType("optional", param) -} - -// NewOpaqueType creates an abstract parameterized type with a given name. -func NewOpaqueType(name string, params ...*Type) *Type { - return &Type{ - kind: OpaqueKind, - parameters: params, - runtimeTypeName: name, - } -} - -// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type. -// -// An object type is assumed to support field presence testing and field indexing. Additionally, the -// type may also indicate additional traits through the use of the optional traits vararg argument. -func NewObjectType(typeName string, traits ...int) *Type { - // Function sanitizes object types on the fly - if wkt, found := checkedWellKnowns[typeName]; found { - return wkt - } - traitMask := 0 - for _, trait := range traits { - traitMask |= trait - } - return &Type{ - kind: StructKind, - parameters: emptyParams, - runtimeTypeName: typeName, - traitMask: structTypeTraitMask | traitMask, - } -} - -// NewObjectTypeValue creates a type reference to an externally defined type. -// -// Deprecated: use cel.ObjectType(typeName) -func NewObjectTypeValue(typeName string) *Type { - return NewObjectType(typeName) -} - -// NewTypeValue creates an opaque type which has a set of optional type traits as defined in -// the common/types/traits package. -// -// Deprecated: use cel.ObjectType(typeName, traits) -func NewTypeValue(typeName string, traits ...int) *Type { - traitMask := 0 - for _, trait := range traits { - traitMask |= trait - } - return &Type{ - kind: StructKind, - parameters: emptyParams, - runtimeTypeName: typeName, - traitMask: traitMask, - } -} - -// NewTypeParamType creates a parameterized type instance. -func NewTypeParamType(paramName string) *Type { - return &Type{ - kind: TypeParamKind, - runtimeTypeName: paramName, - } -} - -// NewTypeTypeWithParam creates a type with a type parameter. -// Used for type-checking purposes, but equivalent to TypeType otherwise. -func NewTypeTypeWithParam(param *Type) *Type { - return &Type{ - kind: TypeKind, - runtimeTypeName: "type", - parameters: []*Type{param}, - } -} - -// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. -func TypeToExprType(t *Type) (*exprpb.Type, error) { - switch t.Kind() { - case AnyKind: - return chkdecls.Any, nil - case BoolKind: - return maybeWrapper(t, chkdecls.Bool), nil - case BytesKind: - return maybeWrapper(t, chkdecls.Bytes), nil - case DoubleKind: - return maybeWrapper(t, chkdecls.Double), nil - case DurationKind: - return chkdecls.Duration, nil - case DynKind: - return chkdecls.Dyn, nil - case ErrorKind: - return chkdecls.Error, nil - case IntKind: - return maybeWrapper(t, chkdecls.Int), nil - case ListKind: - if len(t.Parameters()) != 1 { - return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters())) - } - et, err := TypeToExprType(t.Parameters()[0]) - if err != nil { - return nil, err - } - return chkdecls.NewListType(et), nil - case MapKind: - if len(t.Parameters()) != 2 { - return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters())) - } - kt, err := TypeToExprType(t.Parameters()[0]) - if err != nil { - return nil, err - } - vt, err := TypeToExprType(t.Parameters()[1]) - if err != nil { - return nil, err - } - return chkdecls.NewMapType(kt, vt), nil - case NullTypeKind: - return chkdecls.Null, nil - case OpaqueKind: - params := make([]*exprpb.Type, len(t.Parameters())) - for i, p := range t.Parameters() { - pt, err := TypeToExprType(p) - if err != nil { - return nil, err - } - params[i] = pt - } - return chkdecls.NewAbstractType(t.TypeName(), params...), nil - case StringKind: - return maybeWrapper(t, chkdecls.String), nil - case StructKind: - return chkdecls.NewObjectType(t.TypeName()), nil - case TimestampKind: - return chkdecls.Timestamp, nil - case TypeParamKind: - return chkdecls.NewTypeParamType(t.TypeName()), nil - case TypeKind: - if len(t.Parameters()) == 1 { - p, err := TypeToExprType(t.Parameters()[0]) - if err != nil { - return nil, err - } - return chkdecls.NewTypeType(p), nil - } - return chkdecls.NewTypeType(nil), nil - case UintKind: - return maybeWrapper(t, chkdecls.Uint), nil - } - return nil, fmt.Errorf("missing type conversion to proto: %v", t) -} - -// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. -func ExprTypeToType(t *exprpb.Type) (*Type, error) { - switch t.GetTypeKind().(type) { - case *exprpb.Type_Dyn: - return DynType, nil - case *exprpb.Type_AbstractType_: - paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) - for i, p := range t.GetAbstractType().GetParameterTypes() { - pt, err := ExprTypeToType(p) - if err != nil { - return nil, err - } - paramTypes[i] = pt - } - return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil - case *exprpb.Type_ListType_: - et, err := ExprTypeToType(t.GetListType().GetElemType()) - if err != nil { - return nil, err - } - return NewListType(et), nil - case *exprpb.Type_MapType_: - kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) - if err != nil { - return nil, err - } - vt, err := ExprTypeToType(t.GetMapType().GetValueType()) - if err != nil { - return nil, err - } - return NewMapType(kt, vt), nil - case *exprpb.Type_MessageType: - return NewObjectType(t.GetMessageType()), nil - case *exprpb.Type_Null: - return NullType, nil - case *exprpb.Type_Primitive: - switch t.GetPrimitive() { - case exprpb.Type_BOOL: - return BoolType, nil - case exprpb.Type_BYTES: - return BytesType, nil - case exprpb.Type_DOUBLE: - return DoubleType, nil - case exprpb.Type_INT64: - return IntType, nil - case exprpb.Type_STRING: - return StringType, nil - case exprpb.Type_UINT64: - return UintType, nil - default: - return nil, fmt.Errorf("unsupported primitive type: %v", t) - } - case *exprpb.Type_TypeParam: - return NewTypeParamType(t.GetTypeParam()), nil - case *exprpb.Type_Type: - if t.GetType().GetTypeKind() != nil { - p, err := ExprTypeToType(t.GetType()) - if err != nil { - return nil, err - } - return NewTypeTypeWithParam(p), nil - } - return TypeType, nil - case *exprpb.Type_WellKnown: - switch t.GetWellKnown() { - case exprpb.Type_ANY: - return AnyType, nil - case exprpb.Type_DURATION: - return DurationType, nil - case exprpb.Type_TIMESTAMP: - return TimestampType, nil - default: - return nil, fmt.Errorf("unsupported well-known type: %v", t) - } - case *exprpb.Type_Wrapper: - t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) - if err != nil { - return nil, err - } - return NewNullableType(t), nil - case *exprpb.Type_Error: - return ErrorType, nil - default: - return nil, fmt.Errorf("unsupported type: %v", t) - } -} - -func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { - if t.IsAssignableType(NullType) { - return chkdecls.NewWrapperType(pbType) - } - return pbType -} - -func maybeForeignType(t ref.Type) *Type { - if celType, ok := t.(*Type); ok { - return celType - } - // Inspect the incoming type to determine its traits. The assumption will be that the incoming - // type does not have any field values; however, if the trait mask indicates that field testing - // and indexing are supported, the foreign type is marked as a struct. - traitMask := 0 - for _, trait := range allTraits { - if t.HasTrait(trait) { - traitMask |= trait - } - } - // Treat the value like a struct. If it has no fields, this is harmless to denote the type - // as such since it basically becomes an opaque type by convention. - return NewObjectType(t.TypeName(), traitMask) -} - -var ( - checkedWellKnowns = map[string]*Type{ - // Wrapper types. - "google.protobuf.BoolValue": NewNullableType(BoolType), - "google.protobuf.BytesValue": NewNullableType(BytesType), - "google.protobuf.DoubleValue": NewNullableType(DoubleType), - "google.protobuf.FloatValue": NewNullableType(DoubleType), - "google.protobuf.Int64Value": NewNullableType(IntType), - "google.protobuf.Int32Value": NewNullableType(IntType), - "google.protobuf.UInt64Value": NewNullableType(UintType), - "google.protobuf.UInt32Value": NewNullableType(UintType), - "google.protobuf.StringValue": NewNullableType(StringType), - // Well-known types. - "google.protobuf.Any": AnyType, - "google.protobuf.Duration": DurationType, - "google.protobuf.Timestamp": TimestampType, - // Json types. - "google.protobuf.ListValue": NewListType(DynType), - "google.protobuf.NullValue": NullType, - "google.protobuf.Struct": NewMapType(StringType, DynType), - "google.protobuf.Value": DynType, - } - - emptyParams = []*Type{} - - allTraits = []int{ - traits.AdderType, - traits.ComparerType, - traits.ContainerType, - traits.DividerType, - traits.FieldTesterType, - traits.IndexerType, - traits.IterableType, - traits.IteratorType, - traits.MatcherType, - traits.ModderType, - traits.MultiplierType, - traits.NegatorType, - traits.ReceiverType, - traits.SizerType, - traits.SubtractorType, - } - - structTypeTraitMask = traits.FieldTesterType | traits.IndexerType -) diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go index 3257f9ade97..615c7ec5230 100644 --- a/vendor/github.com/google/cel-go/common/types/uint.go +++ b/vendor/github.com/google/cel-go/common/types/uint.go @@ -21,6 +21,7 @@ import ( "strconv" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" anypb "google.golang.org/protobuf/types/known/anypb" structpb "google.golang.org/protobuf/types/known/structpb" @@ -31,6 +32,15 @@ import ( type Uint uint64 var ( + // UintType singleton. + UintType = NewTypeValue("uint", + traits.AdderType, + traits.ComparerType, + traits.DividerType, + traits.ModderType, + traits.MultiplierType, + traits.SubtractorType) + uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{}) uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{}) diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go index 9dd2b257947..bc411c15b92 100644 --- a/vendor/github.com/google/cel-go/common/types/unknown.go +++ b/vendor/github.com/google/cel-go/common/types/unknown.go @@ -15,312 +15,52 @@ package types import ( - "fmt" - "math" "reflect" - "sort" - "strings" - "unicode" "github.com/google/cel-go/common/types/ref" ) +// Unknown type implementation which collects expression ids which caused the +// current value to become unknown. +type Unknown []int64 + var ( - unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}} + // UnknownType singleton. + UnknownType = NewTypeValue("unknown") ) -// NewAttributeTrail creates a new simple attribute from a variable name. -func NewAttributeTrail(variable string) *AttributeTrail { - if variable == "" { - return unspecifiedAttribute - } - return &AttributeTrail{variable: variable} -} - -// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to -// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable. -// -// The qualifer path elements adhere to the AttributeQualifier type constraint. -type AttributeTrail struct { - variable string - qualifierPath []any -} - -// Equal returns whether two attribute values have the same variable name and qualifier paths. -func (a *AttributeTrail) Equal(other *AttributeTrail) bool { - if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) { - return false - } - for i, q := range a.QualifierPath() { - qual := other.QualifierPath()[i] - if !qualifiersEqual(q, qual) { - return false - } - } - return true -} - -func qualifiersEqual(a, b any) bool { - if a == b { - return true - } - switch numA := a.(type) { - case int64: - numB, ok := b.(uint64) - if !ok { - return false - } - return intUintEqual(numA, numB) - case uint64: - numB, ok := b.(int64) - if !ok { - return false - } - return intUintEqual(numB, numA) - default: - return false - } -} - -func intUintEqual(i int64, u uint64) bool { - if i < 0 || u > math.MaxInt64 { - return false - } - return i == int64(u) -} - -// Variable returns the variable name associated with the attribute. -func (a *AttributeTrail) Variable() string { - return a.variable -} - -// QualifierPath returns the optional set of qualifying fields or indices applied to the variable. -func (a *AttributeTrail) QualifierPath() []any { - return a.qualifierPath -} - -// String returns the string representation of the Attribute. -func (a *AttributeTrail) String() string { - if a.variable == "" { - return "" - } - var str strings.Builder - str.WriteString(a.variable) - for _, q := range a.qualifierPath { - switch q := q.(type) { - case bool, int64: - str.WriteString(fmt.Sprintf("[%v]", q)) - case uint64: - str.WriteString(fmt.Sprintf("[%vu]", q)) - case string: - if isIdentifierCharacter(q) { - str.WriteString(fmt.Sprintf(".%v", q)) - } else { - str.WriteString(fmt.Sprintf("[%q]", q)) - } - } - } - return str.String() -} - -func isIdentifierCharacter(str string) bool { - for _, c := range str { - if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" { - continue - } - return false - } - return true -} - -// AttributeQualifier constrains the possible types which may be used to qualify an attribute. -type AttributeQualifier interface { - bool | int64 | uint64 | string -} - -// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type. -func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail { - attr.qualifierPath = append(attr.qualifierPath, qualifier) - return attr -} - -// Unknown type which collects expression ids which caused the current value to become unknown. -type Unknown struct { - attributeTrails map[int64][]*AttributeTrail -} - -// NewUnknown creates a new unknown at a given expression id for an attribute. -// -// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`. -func NewUnknown(id int64, attr *AttributeTrail) *Unknown { - if attr == nil { - attr = unspecifiedAttribute - } - return &Unknown{ - attributeTrails: map[int64][]*AttributeTrail{id: {attr}}, - } -} - -// IDs returns the set of unknown expression ids contained by this value. -// -// Numeric identifiers are guaranteed to be in sorted order. -func (u *Unknown) IDs() []int64 { - ids := make(int64Slice, len(u.attributeTrails)) - i := 0 - for id := range u.attributeTrails { - ids[i] = id - i++ - } - ids.Sort() - return ids -} - -// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id. -func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) { - trails, found := u.attributeTrails[id] - return trails, found -} - -// Contains returns true if the input unknown is a subset of the current unknown. -func (u *Unknown) Contains(other *Unknown) bool { - for id, otherTrails := range other.attributeTrails { - trails, found := u.attributeTrails[id] - if !found || len(otherTrails) != len(trails) { - return false - } - for _, ot := range otherTrails { - found := false - for _, t := range trails { - if t.Equal(ot) { - found = true - break - } - } - if !found { - return false - } - } - } - return true -} - // ConvertToNative implements ref.Val.ConvertToNative. -func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { +func (u Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { return u.Value(), nil } // ConvertToType is an identity function since unknown values cannot be modified. -func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val { +func (u Unknown) ConvertToType(typeVal ref.Type) ref.Val { return u } // Equal is an identity function since unknown values cannot be modified. -func (u *Unknown) Equal(other ref.Val) ref.Val { +func (u Unknown) Equal(other ref.Val) ref.Val { return u } -// String implements the Stringer interface -func (u *Unknown) String() string { - var str strings.Builder - for id, attrs := range u.attributeTrails { - if str.Len() != 0 { - str.WriteString(", ") - } - if len(attrs) == 1 { - str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id)) - } else { - str.WriteString(fmt.Sprintf("%v (%d)", attrs, id)) - } - } - return str.String() -} - // Type implements ref.Val.Type. -func (u *Unknown) Type() ref.Type { +func (u Unknown) Type() ref.Type { return UnknownType } // Value implements ref.Val.Value. -func (u *Unknown) Value() any { - return u +func (u Unknown) Value() any { + return []int64(u) } -// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown +// IsUnknown returns whether the element ref.Type or ref.Val is equal to the +// UnknownType singleton. func IsUnknown(val ref.Val) bool { switch val.(type) { - case *Unknown: + case Unknown: return true default: return false } } - -// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce -// an unknown result. -// -// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input -// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil. -// If both values are non-nil and unknown, then the return value will be a merge of both unknowns. -func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) { - src, isUnk := val.(*Unknown) - if !isUnk { - if unk != nil { - return unk, true - } - return unk, false - } - return MergeUnknowns(src, unk), true -} - -// MergeUnknowns combines two unknown values into a new unknown value. -func MergeUnknowns(unk1, unk2 *Unknown) *Unknown { - if unk1 == nil { - return unk2 - } - if unk2 == nil { - return unk1 - } - out := &Unknown{ - attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)), - } - for id, ats := range unk1.attributeTrails { - out.attributeTrails[id] = ats - } - for id, ats := range unk2.attributeTrails { - existing, found := out.attributeTrails[id] - if !found { - out.attributeTrails[id] = ats - continue - } - - for _, at := range ats { - found := false - for _, et := range existing { - if at.Equal(et) { - found = true - break - } - } - if !found { - existing = append(existing, at) - } - } - out.attributeTrails[id] = existing - } - return out -} - -// int64Slice is an implementation of the sort.Interface -type int64Slice []int64 - -// Len returns the number of elements in the slice. -func (x int64Slice) Len() int { return len(x) } - -// Less indicates whether the value at index i is less than the value at index j. -func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] } - -// Swap swaps the values at indices i and j in place. -func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -// Sort is a convenience method: x.Sort() calls Sort(x). -func (x int64Slice) Sort() { sort.Sort(x) } diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go index 71662eee31b..a8e9afa9e70 100644 --- a/vendor/github.com/google/cel-go/common/types/util.go +++ b/vendor/github.com/google/cel-go/common/types/util.go @@ -21,7 +21,7 @@ import ( // IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType. func IsUnknownOrError(val ref.Val) bool { switch val.(type) { - case *Unknown, *Err: + case Unknown, *Err: return true } return false diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel index 6fdcc60c658..4bcf8a283ea 100644 --- a/vendor/github.com/google/cel-go/ext/BUILD.bazel +++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -9,7 +9,6 @@ go_library( srcs = [ "encoders.go", "guards.go", - "lists.go", "math.go", "native.go", "protos.go", @@ -20,8 +19,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//cel:go_default_library", - "//checker:go_default_library", "//checker/decls:go_default_library", + "//common:go_default_library", "//common/overloads:go_default_library", "//common/types:go_default_library", "//common/types/pb:go_default_library", @@ -42,7 +41,6 @@ go_test( size = "small", srcs = [ "encoders_test.go", - "lists_test.go", "math_test.go", "native_test.go", "protos_test.go", @@ -55,6 +53,7 @@ go_test( deps = [ "//cel:go_default_library", "//checker:go_default_library", + "//common:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md index 6f621ac4af6..ef0eb2ab7f5 100644 --- a/vendor/github.com/google/cel-go/ext/README.md +++ b/vendor/github.com/google/cel-go/ext/README.md @@ -149,23 +149,6 @@ Example: proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false -## Lists - -Extended functions for list manipulation. As a general note, all indices are -zero-based. - -### Slice - - -Returns a new sub-list using the indexes provided. - - .slice(, ) -> - -Examples: - - [1,2,3,4].slice(1, 3) // return [2, 3] - [1,2,3,4].slice(2, 4) // return [3 ,4] - ## Sets Sets provides set relationship tests. diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go index 4ac9a7f07fc..9cc3c3efe58 100644 --- a/vendor/github.com/google/cel-go/ext/bindings.go +++ b/vendor/github.com/google/cel-go/ext/bindings.go @@ -16,6 +16,7 @@ package ext import ( "github.com/google/cel-go/cel" + "github.com/google/cel-go/common" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -70,7 +71,7 @@ func (celBindings) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { if !macroTargetMatchesNamespace(celNamespace, target) { return nil, nil } @@ -80,7 +81,10 @@ func celBind(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) case *exprpb.Expr_IdentExpr: varName = varIdent.GetIdentExpr().GetName() default: - return nil, meh.NewError(varIdent.GetId(), "cel.bind() variable names must be simple identifiers") + return nil, &common.Error{ + Message: "cel.bind() variable names must be simple identifers", + Location: meh.OffsetLocation(varIdent.GetId()), + } } varInit := args[1] resultExpr := args[2] diff --git a/vendor/github.com/google/cel-go/ext/encoders.go b/vendor/github.com/google/cel-go/ext/encoders.go index 61ac0b77750..d9f9cb5152d 100644 --- a/vendor/github.com/google/cel-go/ext/encoders.go +++ b/vendor/github.com/google/cel-go/ext/encoders.go @@ -16,6 +16,7 @@ package ext import ( "encoding/base64" + "reflect" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" @@ -85,3 +86,7 @@ func base64DecodeString(str string) ([]byte, error) { func base64EncodeBytes(bytes []byte) (string, error) { return base64.StdEncoding.EncodeToString(bytes), nil } + +var ( + bytesListType = reflect.TypeOf([]byte{}) +) diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go index 785c8675bbf..4c7786a690b 100644 --- a/vendor/github.com/google/cel-go/ext/guards.go +++ b/vendor/github.com/google/cel-go/ext/guards.go @@ -17,7 +17,6 @@ package ext import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) diff --git a/vendor/github.com/google/cel-go/ext/lists.go b/vendor/github.com/google/cel-go/ext/lists.go deleted file mode 100644 index 08751d08a1e..00000000000 --- a/vendor/github.com/google/cel-go/ext/lists.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ext - -import ( - "fmt" - - "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types" - "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" -) - -// Lists returns a cel.EnvOption to configure extended functions for list manipulation. -// As a general note, all indices are zero-based. -// # Slice -// -// Returns a new sub-list using the indexes provided. -// -// .slice(, ) -> -// -// Examples: -// -// [1,2,3,4].slice(1, 3) // return [2, 3] -// [1,2,3,4].slice(2, 4) // return [3 ,4] -func Lists() cel.EnvOption { - return cel.Lib(listsLib{}) -} - -type listsLib struct{} - -// LibraryName implements the SingletonLibrary interface method. -func (listsLib) LibraryName() string { - return "cel.lib.ext.lists" -} - -// CompileOptions implements the Library interface method. -func (listsLib) CompileOptions() []cel.EnvOption { - listType := cel.ListType(cel.TypeParamType("T")) - return []cel.EnvOption{ - cel.Function("slice", - cel.MemberOverload("list_slice", - []*cel.Type{listType, cel.IntType, cel.IntType}, listType, - cel.FunctionBinding(func(args ...ref.Val) ref.Val { - list := args[0].(traits.Lister) - start := args[1].(types.Int) - end := args[2].(types.Int) - result, err := slice(list, start, end) - if err != nil { - return types.WrapErr(err) - } - return result - }), - ), - ), - } -} - -// ProgramOptions implements the Library interface method. -func (listsLib) ProgramOptions() []cel.ProgramOption { - return []cel.ProgramOption{} -} - -func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { - listLength := list.Size().(types.Int) - if start < 0 || end < 0 { - return nil, fmt.Errorf("cannot slice(%d, %d), negative indexes not supported", start, end) - } - if start > end { - return nil, fmt.Errorf("cannot slice(%d, %d), start index must be less than or equal to end index", start, end) - } - if listLength < end { - return nil, fmt.Errorf("cannot slice(%d, %d), list is length %d", start, end, listLength) - } - - var newList []ref.Val - for i := types.Int(start); i < end; i++ { - val := list.Get(i) - newList = append(newList, val) - } - return types.DefaultTypeAdapter.NativeToValue(newList), nil -} diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go index 0b9a3610314..1c8ad585a17 100644 --- a/vendor/github.com/google/cel-go/ext/math.go +++ b/vendor/github.com/google/cel-go/ext/math.go @@ -19,10 +19,10 @@ import ( "strings" "github.com/google/cel-go/cel" + "github.com/google/cel-go/common" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -187,18 +187,24 @@ func (mathLib) ProgramOptions() []cel.ProgramOption { return []cel.ProgramOption{} } -func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, meh.NewError(target.GetId(), "math.least() requires at least one argument") + return nil, &common.Error{ + Message: "math.least() requires at least one argument", + Location: meh.OffsetLocation(target.GetId()), + } case 1: if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { return meh.GlobalCall(minFunc, args[0]), nil } - return nil, meh.NewError(args[0].GetId(), "math.least() invalid single argument value") + return nil, &common.Error{ + Message: "math.least() invalid single argument value", + Location: meh.OffsetLocation(args[0].GetId()), + } case 2: err := checkInvalidArgs(meh, "math.least()", args) if err != nil { @@ -214,18 +220,24 @@ func mathLeast(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr } } -func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func mathGreatest(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { if !macroTargetMatchesNamespace(mathNamespace, target) { return nil, nil } switch len(args) { case 0: - return nil, meh.NewError(target.GetId(), "math.greatest() requires at least one argument") + return nil, &common.Error{ + Message: "math.greatest() requires at least one argument", + Location: meh.OffsetLocation(target.GetId()), + } case 1: if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) { return meh.GlobalCall(maxFunc, args[0]), nil } - return nil, meh.NewError(args[0].GetId(), "math.greatest() invalid single argument value") + return nil, &common.Error{ + Message: "math.greatest() invalid single argument value", + Location: meh.OffsetLocation(args[0].GetId()), + } case 2: err := checkInvalidArgs(meh, "math.greatest()", args) if err != nil { @@ -311,11 +323,14 @@ func maxList(numList ref.Val) ref.Val { } } -func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *cel.Error { +func checkInvalidArgs(meh cel.MacroExprHelper, funcName string, args []*exprpb.Expr) *common.Error { for _, arg := range args { err := checkInvalidArgLiteral(funcName, arg) if err != nil { - return meh.NewError(arg.GetId(), err.Error()) + return &common.Error{ + Message: err.Error(), + Location: meh.OffsetLocation(arg.GetId()), + } } } return nil diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go index 0b5fc38ca97..acbc44b6d51 100644 --- a/vendor/github.com/google/cel-go/ext/native.go +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -24,11 +24,13 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/pb" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" structpb "google.golang.org/protobuf/types/known/structpb" ) @@ -79,7 +81,7 @@ var ( // the time that it is invoked. func NativeTypes(refTypes ...any) cel.EnvOption { return func(env *cel.Env) (*cel.Env, error) { - tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...) + tp, err := newNativeTypeProvider(env.TypeAdapter(), env.TypeProvider(), refTypes...) if err != nil { return nil, err } @@ -91,7 +93,7 @@ func NativeTypes(refTypes ...any) cel.EnvOption { } } -func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { +func newNativeTypeProvider(adapter ref.TypeAdapter, provider ref.TypeProvider, refTypes ...any) (*nativeTypeProvider, error) { nativeTypes := make(map[string]*nativeType, len(refTypes)) for _, refType := range refTypes { switch rt := refType.(type) { @@ -120,18 +122,18 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy type nativeTypeProvider struct { nativeTypes map[string]*nativeType - baseAdapter types.Adapter - baseProvider types.Provider + baseAdapter ref.TypeAdapter + baseProvider ref.TypeProvider } -// EnumValue proxies to the types.Provider configured at the times the NativeTypes +// EnumValue proxies to the ref.TypeProvider configured at the times the NativeTypes // option was configured. func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val { return tp.baseProvider.EnumValue(enumName) } // FindIdent looks up natives type instances by qualified identifier, and if not found -// proxies to the composed types.Provider. +// proxies to the composed ref.TypeProvider. func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { if t, found := tp.nativeTypes[typeName]; found { return t, true @@ -139,35 +141,32 @@ func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { return tp.baseProvider.FindIdent(typeName) } -// FindStructType looks up the CEL type definition by qualified identifier, and if not found -// proxies to the composed types.Provider. -func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool) { +// FindType looks up CEL type-checker type definition by qualified identifier, and if not found +// proxies to the composed ref.TypeProvider. +func (tp *nativeTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { if _, found := tp.nativeTypes[typeName]; found { - return types.NewTypeTypeWithParam(types.NewObjectType(typeName)), true + return decls.NewTypeType(decls.NewObjectType(typeName)), true } - if celType, found := tp.baseProvider.FindStructType(typeName); found { - return celType, true - } - return tp.baseProvider.FindStructType(typeName) + return tp.baseProvider.FindType(typeName) } -// FindStructFieldType looks up a native type's field definition, and if the type name is not a native -// type then proxies to the composed types.Provider -func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { +// FindFieldType looks up a native type's field definition, and if the type name is not a native +// type then proxies to the composed ref.TypeProvider +func (tp *nativeTypeProvider) FindFieldType(typeName, fieldName string) (*ref.FieldType, bool) { t, found := tp.nativeTypes[typeName] if !found { - return tp.baseProvider.FindStructFieldType(typeName, fieldName) + return tp.baseProvider.FindFieldType(typeName, fieldName) } refField, isDefined := t.hasField(fieldName) if !found || !isDefined { return nil, false } - celType, ok := convertToCelType(refField.Type) + exprType, ok := convertToExprType(refField.Type) if !ok { return nil, false } - return &types.FieldType{ - Type: celType, + return &ref.FieldType{ + Type: exprType, IsSet: func(obj any) bool { refVal := reflect.Indirect(reflect.ValueOf(obj)) refField := refVal.FieldByName(fieldName) @@ -244,74 +243,75 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { } } -func convertToCelType(refType reflect.Type) (*cel.Type, bool) { +// convertToExprType converts the Golang reflect.Type to a protobuf exprpb.Type. +func convertToExprType(refType reflect.Type) (*exprpb.Type, bool) { switch refType.Kind() { case reflect.Bool: - return cel.BoolType, true + return decls.Bool, true case reflect.Float32, reflect.Float64: - return cel.DoubleType, true + return decls.Double, true case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: if refType == durationType { - return cel.DurationType, true + return decls.Duration, true } - return cel.IntType, true + return decls.Int, true case reflect.String: - return cel.StringType, true + return decls.String, true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return cel.UintType, true + return decls.Uint, true case reflect.Array, reflect.Slice: refElem := refType.Elem() if refElem == reflect.TypeOf(byte(0)) { - return cel.BytesType, true + return decls.Bytes, true } - elemType, ok := convertToCelType(refElem) + elemType, ok := convertToExprType(refElem) if !ok { return nil, false } - return cel.ListType(elemType), true + return decls.NewListType(elemType), true case reflect.Map: - keyType, ok := convertToCelType(refType.Key()) + keyType, ok := convertToExprType(refType.Key()) if !ok { return nil, false } // Ensure the key type is a int, bool, uint, string - elemType, ok := convertToCelType(refType.Elem()) + elemType, ok := convertToExprType(refType.Elem()) if !ok { return nil, false } - return cel.MapType(keyType, elemType), true + return decls.NewMapType(keyType, elemType), true case reflect.Struct: if refType == timestampType { - return cel.TimestampType, true + return decls.Timestamp, true } - return cel.ObjectType( + return decls.NewObjectType( fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), ), true case reflect.Pointer: if refType.Implements(pbMsgInterfaceType) { pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage) - return cel.ObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true + return decls.NewObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true } - return convertToCelType(refType.Elem()) + return convertToExprType(refType.Elem()) } return nil, false } -func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val { +func newNativeObject(adapter ref.TypeAdapter, val any, refValue reflect.Value) ref.Val { valType, err := newNativeType(refValue.Type()) if err != nil { return types.NewErr(err.Error()) } return &nativeObj{ - Adapter: adapter, - val: val, - valType: valType, - refValue: refValue, + TypeAdapter: adapter, + val: val, + valType: valType, + refValue: refValue, } } type nativeObj struct { - types.Adapter + ref.TypeAdapter val any valType *nativeType refValue reflect.Value @@ -520,11 +520,11 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { return f, true } -func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { +func adaptFieldValue(adapter ref.TypeAdapter, refField reflect.Value) ref.Val { return adapter.NativeToValue(getFieldValue(adapter, refField)) } -func getFieldValue(adapter types.Adapter, refField reflect.Value) any { +func getFieldValue(adapter ref.TypeAdapter, refField reflect.Value) any { if refField.IsZero() { switch refField.Kind() { case reflect.Array, reflect.Slice: diff --git a/vendor/github.com/google/cel-go/ext/protos.go b/vendor/github.com/google/cel-go/ext/protos.go index a7ca27a6a27..b905e710c14 100644 --- a/vendor/github.com/google/cel-go/ext/protos.go +++ b/vendor/github.com/google/cel-go/ext/protos.go @@ -16,6 +16,7 @@ package ext import ( "github.com/google/cel-go/cel" + "github.com/google/cel-go/common" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -85,7 +86,7 @@ func (protoLib) ProgramOptions() []cel.ProgramOption { } // hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. -func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } @@ -97,7 +98,7 @@ func hasProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Ex } // getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. -func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *cel.Error) { +func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { if !macroTargetMatchesNamespace(protoNamespace, target) { return nil, nil } @@ -108,7 +109,7 @@ func getProtoExt(meh cel.MacroExprHelper, target *exprpb.Expr, args []*exprpb.Ex return meh.Select(args[0], extFieldName), nil } -func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.Error) { +func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *common.Error) { isValid := false extensionField := "" switch expr.GetExprKind().(type) { @@ -116,7 +117,10 @@ func getExtFieldName(meh cel.MacroExprHelper, expr *exprpb.Expr) (string, *cel.E extensionField, isValid = validateIdentifier(expr) } if !isValid { - return "", meh.NewError(expr.GetId(), "invalid extension field") + return "", &common.Error{ + Message: "invalid extension field", + Location: meh.OffsetLocation(expr.GetId()), + } } return extensionField, nil } diff --git a/vendor/github.com/google/cel-go/ext/sets.go b/vendor/github.com/google/cel-go/ext/sets.go index 833c15f616f..4820d6199e6 100644 --- a/vendor/github.com/google/cel-go/ext/sets.go +++ b/vendor/github.com/google/cel-go/ext/sets.go @@ -15,14 +15,10 @@ package ext import ( - "math" - "github.com/google/cel-go/cel" - "github.com/google/cel-go/checker" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" - "github.com/google/cel-go/interpreter" ) // Sets returns a cel.EnvOption to configure namespaced set relationship @@ -99,24 +95,12 @@ func (setsLib) CompileOptions() []cel.EnvOption { cel.Function("sets.intersects", cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType, cel.BinaryBinding(setsIntersects))), - cel.CostEstimatorOptions( - checker.OverloadCostEstimate("list_sets_contains_list", estimateSetsCost(1)), - checker.OverloadCostEstimate("list_sets_intersects_list", estimateSetsCost(1)), - // equivalence requires potentially two m*n comparisons to ensure each list is contained by the other - checker.OverloadCostEstimate("list_sets_equivalent_list", estimateSetsCost(2)), - ), } } // ProgramOptions implements the Library interface method. func (setsLib) ProgramOptions() []cel.ProgramOption { - return []cel.ProgramOption{ - cel.CostTrackerOptions( - interpreter.OverloadCostTracker("list_sets_contains_list", trackSetsCost(1)), - interpreter.OverloadCostTracker("list_sets_intersects_list", trackSetsCost(1)), - interpreter.OverloadCostTracker("list_sets_equivalent_list", trackSetsCost(2)), - ), - } + return []cel.ProgramOption{} } func setsIntersects(listA, listB ref.Val) ref.Val { @@ -152,46 +136,3 @@ func setsEquivalent(listA, listB ref.Val) ref.Val { } return setsContains(listB, listA) } - -func estimateSetsCost(costFactor float64) checker.FunctionEstimator { - return func(estimator checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { - if len(args) == 2 { - arg0Size := estimateSize(estimator, args[0]) - arg1Size := estimateSize(estimator, args[1]) - costEstimate := arg0Size.Multiply(arg1Size).MultiplyByCostFactor(costFactor).Add(callCostEstimate) - return &checker.CallEstimate{CostEstimate: costEstimate} - } - return nil - } -} - -func estimateSize(estimator checker.CostEstimator, node checker.AstNode) checker.SizeEstimate { - if l := node.ComputedSize(); l != nil { - return *l - } - if l := estimator.EstimateSize(node); l != nil { - return *l - } - return checker.SizeEstimate{Min: 0, Max: math.MaxUint64} -} - -func trackSetsCost(costFactor float64) interpreter.FunctionTracker { - return func(args []ref.Val, _ ref.Val) *uint64 { - lhsSize := actualSize(args[0]) - rhsSize := actualSize(args[1]) - cost := callCost + uint64(float64(lhsSize*rhsSize)*costFactor) - return &cost - } -} - -func actualSize(value ref.Val) uint64 { - if sz, ok := value.(traits.Sizer); ok { - return uint64(sz.Size().(types.Int)) - } - return 1 -} - -var ( - callCostEstimate = checker.CostEstimate{Min: 1, Max: 1} - callCost = uint64(1) -) diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go index 88c119f2b0b..8455d582909 100644 --- a/vendor/github.com/google/cel-go/ext/strings.go +++ b/vendor/github.com/google/cel-go/ext/strings.go @@ -173,7 +173,7 @@ const ( // 'TacoCat'.lowerAscii() // returns 'tacocat' // 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' // -// # Strings.Quote +// # Quote // // Introduced in version: 1 // @@ -301,28 +301,26 @@ func StringsLocale(locale string) StringsOption { } } -// StringsVersion configures the version of the string library. -// -// The version limits which functions are available. Only functions introduced -// below or equal to the given version included in the library. If this option -// is not set, all functions are available. -// -// See the library documentation to determine which version a function was introduced. -// If the documentation does not state which version a function was introduced, it can -// be assumed to be introduced at version 0, when the library was first created. -func StringsVersion(version uint32) StringsOption { - return func(lib *stringLib) *stringLib { - lib.version = version - return lib +// StringsVersion configures the version of the string library. The version limits which +// functions are available. Only functions introduced below or equal to the given +// version included in the library. See the library documentation to determine +// which version a function was introduced at. If the documentation does not +// state which version a function was introduced at, it can be assumed to be +// introduced at version 0, when the library was first created. +// If this option is not set, all functions are available. +func StringsVersion(version uint32) func(lib *stringLib) *stringLib { + return func(sl *stringLib) *stringLib { + sl.version = version + return sl } } // CompileOptions implements the Library interface method. -func (lib *stringLib) CompileOptions() []cel.EnvOption { +func (sl *stringLib) CompileOptions() []cel.EnvOption { formatLocale := "en_US" - if lib.locale != "" { + if sl.locale != "" { // ensure locale is properly-formed if set - _, err := language.Parse(lib.locale) + _, err := language.Parse(sl.locale) if err != nil { return []cel.EnvOption{ func(e *cel.Env) (*cel.Env, error) { @@ -330,7 +328,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { }, } } - formatLocale = lib.locale + formatLocale = sl.locale } opts := []cel.EnvOption{ @@ -434,7 +432,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { return stringOrError(upperASCII(string(s))) }))), } - if lib.version >= 1 { + if sl.version >= 1 { opts = append(opts, cel.Function("format", cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType, cel.FunctionBinding(func(args ...ref.Val) ref.Val { @@ -449,7 +447,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption { })))) } - if lib.version >= 2 { + if sl.version >= 2 { opts = append(opts, cel.Function("join", cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel index 3a5219eb5f6..b6d04e00031 100644 --- a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel +++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -25,14 +25,13 @@ go_library( importpath = "github.com/google/cel-go/interpreter", deps = [ "//common:go_default_library", - "//common/ast:go_default_library", "//common/containers:go_default_library", - "//common/functions:go_default_library", "//common/operators:go_default_library", "//common/overloads:go_default_library", "//common/types:go_default_library", "//common/types/ref:go_default_library", "//common/types/traits:go_default_library", + "//interpreter/functions:go_default_library", "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", "@org_golang_google_protobuf//types/known/durationpb:go_default_library", @@ -57,13 +56,12 @@ go_test( ], deps = [ "//checker:go_default_library", + "//checker/decls:go_default_library", "//common/containers:go_default_library", "//common/debug:go_default_library", - "//common/decls:go_default_library", - "//common/functions:go_default_library", "//common/operators:go_default_library", - "//common/stdlib:go_default_library", "//common/types:go_default_library", + "//interpreter/functions:go_default_library", "//parser:go_default_library", "//test:go_default_library", "//test/proto2pb:go_default_library", diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go index a80264451c5..f82e4e9038b 100644 --- a/vendor/github.com/google/cel-go/interpreter/activation.go +++ b/vendor/github.com/google/cel-go/interpreter/activation.go @@ -58,7 +58,7 @@ func (emptyActivation) Parent() Activation { return nil } // The output of the lazy binding will overwrite the variable reference in the internal map. // // Values which are not represented as ref.Val types on input may be adapted to a ref.Val using -// the types.Adapter configured in the environment. +// the ref.TypeAdapter configured in the environment. func NewActivation(bindings any) (Activation, error) { if bindings == nil { return nil, errors.New("bindings must be non-nil") diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go index 1fbaaf17e20..afb7c8d5bf3 100644 --- a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -15,8 +15,6 @@ package interpreter import ( - "fmt" - "github.com/google/cel-go/common/containers" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" @@ -179,8 +177,8 @@ func numericValueEquals(value any, celValue ref.Val) bool { // NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing // AttributePattern matches with PartialActivation inputs. func NewPartialAttributeFactory(container *containers.Container, - adapter types.Adapter, - provider types.Provider) AttributeFactory { + adapter ref.TypeAdapter, + provider ref.TypeProvider) AttributeFactory { fac := NewAttributeFactory(container, adapter, provider) return &partialAttributeFactory{ AttributeFactory: fac, @@ -193,8 +191,8 @@ func NewPartialAttributeFactory(container *containers.Container, type partialAttributeFactory struct { AttributeFactory container *containers.Container - adapter types.Adapter - provider types.Provider + adapter ref.TypeAdapter + provider ref.TypeProvider } // AbsoluteAttribute implementation of the AttributeFactory interface which wraps the @@ -243,15 +241,12 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns( vars PartialActivation, attrID int64, variableNames []string, - qualifiers []Qualifier) (*types.Unknown, error) { + qualifiers []Qualifier) (types.Unknown, error) { patterns := vars.UnknownAttributePatterns() candidateIndices := map[int]struct{}{} for _, variable := range variableNames { for i, pat := range patterns { if pat.VariableMatches(variable) { - if len(qualifiers) == 0 { - return types.NewUnknown(attrID, types.NewAttributeTrail(variable)), nil - } candidateIndices[i] = struct{}{} } } @@ -260,6 +255,10 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns( if len(candidateIndices) == 0 { return nil, nil } + // Determine whether to return early if there are no qualifiers. + if len(qualifiers) == 0 { + return types.Unknown{attrID}, nil + } // Resolve the attribute qualifiers into a static set. This prevents more dynamic // Attribute resolutions than necessary when there are multiple unknown patterns // that traverse the same Attribute-based qualifier field. @@ -301,28 +300,7 @@ func (fac *partialAttributeFactory) matchesUnknownPatterns( } } if isUnk { - attr := types.NewAttributeTrail(pat.variable) - for i := 0; i < len(qualPats) && i < len(newQuals); i++ { - if qual, ok := newQuals[i].(ConstantQualifier); ok { - switch v := qual.Value().Value().(type) { - case bool: - types.QualifyAttribute[bool](attr, v) - case float64: - types.QualifyAttribute[int64](attr, int64(v)) - case int64: - types.QualifyAttribute[int64](attr, v) - case string: - types.QualifyAttribute[string](attr, v) - case uint64: - types.QualifyAttribute[uint64](attr, v) - default: - types.QualifyAttribute[string](attr, fmt.Sprintf("%v", v)) - } - } else { - types.QualifyAttribute[string](attr, "*") - } - } - return types.NewUnknown(matchExprID, attr), nil + return types.Unknown{matchExprID}, nil } } return nil, nil diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go index ca97bdfcf14..1b19dc2b57b 100644 --- a/vendor/github.com/google/cel-go/interpreter/attributes.go +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -22,6 +22,8 @@ import ( "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) // AttributeFactory provides methods creating Attribute and Qualifier values. @@ -59,7 +61,7 @@ type AttributeFactory interface { // The qualifier may consider the object type being qualified, if present. If absent, the // qualification should be considered dynamic and the qualification should still work, though // it may be sub-optimal. - NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) + NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error) } // Qualifier marker interface for designating different qualifier values and where they appear @@ -129,7 +131,7 @@ type NamespacedAttribute interface { // NewAttributeFactory returns a default AttributeFactory which is produces Attribute values // capable of resolving types by simple names and qualify the values using the supported qualifier // types: bool, int, string, and uint. -func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory { +func NewAttributeFactory(cont *containers.Container, a ref.TypeAdapter, p ref.TypeProvider) AttributeFactory { return &attrFactory{ container: cont, adapter: a, @@ -139,8 +141,8 @@ func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Pr type attrFactory struct { container *containers.Container - adapter types.Adapter - provider types.Provider + adapter ref.TypeAdapter + provider ref.TypeProvider } // AbsoluteAttribute refers to a variable value and an optional qualifier path. @@ -197,13 +199,13 @@ func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribu } // NewQualifier is an implementation of the AttributeFactory interface. -func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) { +func (r *attrFactory) NewQualifier(objType *exprpb.Type, qualID int64, val any, opt bool) (Qualifier, error) { // Before creating a new qualifier check to see if this is a protobuf message field access. // If so, use the precomputed GetFrom qualification method rather than the standard // stringQualifier. str, isStr := val.(string) - if isStr && objType != nil && objType.Kind() == types.StructKind { - ft, found := r.provider.FindStructFieldType(objType.TypeName(), str) + if isStr && objType != nil && objType.GetMessageType() != "" { + ft, found := r.provider.FindFieldType(objType.GetMessageType(), str) if found && ft.IsSet != nil && ft.GetFrom != nil { return &fieldQualifier{ id: qualID, @@ -223,8 +225,8 @@ type absoluteAttribute struct { // (package) of the expression. namespaceNames []string qualifiers []Qualifier - adapter types.Adapter - provider types.Provider + adapter ref.TypeAdapter + provider ref.TypeProvider fac AttributeFactory } @@ -323,7 +325,7 @@ type conditionalAttribute struct { expr Interpretable truthy Attribute falsy Attribute - adapter types.Adapter + adapter ref.TypeAdapter fac AttributeFactory } @@ -391,8 +393,8 @@ func (a *conditionalAttribute) String() string { type maybeAttribute struct { id int64 attrs []NamespacedAttribute - adapter types.Adapter - provider types.Provider + adapter ref.TypeAdapter + provider ref.TypeProvider fac AttributeFactory } @@ -509,7 +511,7 @@ type relativeAttribute struct { id int64 operand Interpretable qualifiers []Qualifier - adapter types.Adapter + adapter ref.TypeAdapter fac AttributeFactory } @@ -574,7 +576,7 @@ func (a *relativeAttribute) String() string { return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) } -func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) { +func newQualifier(adapter ref.TypeAdapter, id int64, v any, opt bool) (Qualifier, error) { var qual Qualifier switch val := v.(type) { case Attribute: @@ -655,7 +657,7 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, qual = &doubleQualifier{ id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt, } - case *types.Unknown: + case types.Unknown: qual = &unknownQualifier{id: id, value: val} default: if q, ok := v.(Qualifier); ok { @@ -687,7 +689,7 @@ type stringQualifier struct { id int64 value string celValue ref.Val - adapter types.Adapter + adapter ref.TypeAdapter optional bool } @@ -788,7 +790,7 @@ type intQualifier struct { id int64 value int64 celValue ref.Val - adapter types.Adapter + adapter ref.TypeAdapter optional bool } @@ -915,7 +917,7 @@ type uintQualifier struct { id int64 value uint64 celValue ref.Val - adapter types.Adapter + adapter ref.TypeAdapter optional bool } @@ -980,7 +982,7 @@ type boolQualifier struct { id int64 value bool celValue ref.Val - adapter types.Adapter + adapter ref.TypeAdapter optional bool } @@ -1033,8 +1035,8 @@ func (q *boolQualifier) Value() ref.Val { type fieldQualifier struct { id int64 Name string - FieldType *types.FieldType - adapter types.Adapter + FieldType *ref.FieldType + adapter ref.TypeAdapter optional bool } @@ -1092,7 +1094,7 @@ type doubleQualifier struct { id int64 value float64 celValue ref.Val - adapter types.Adapter + adapter ref.TypeAdapter optional bool } @@ -1129,7 +1131,7 @@ func (q *doubleQualifier) Value() ref.Val { // for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere. type unknownQualifier struct { id int64 - value *types.Unknown + value types.Unknown } // ID is an implementation of the Qualifier interface method. @@ -1223,10 +1225,10 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt // refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and // apply the qualifier with the option to presence test field accesses before retrieving field values. -func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { +func refQualify(adapter ref.TypeAdapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) { celVal := adapter.NativeToValue(obj) switch v := celVal.(type) { - case *types.Unknown: + case types.Unknown: return v, true, nil case *types.Err: return nil, false, v diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go index 502db35fc0f..208487b7d3b 100644 --- a/vendor/github.com/google/cel-go/interpreter/decorators.go +++ b/vendor/github.com/google/cel-go/interpreter/decorators.go @@ -75,13 +75,15 @@ func decDisableShortcircuits() InterpretableDecorator { switch expr := i.(type) { case *evalOr: return &evalExhaustiveOr{ - id: expr.id, - terms: expr.terms, + id: expr.id, + lhs: expr.lhs, + rhs: expr.rhs, }, nil case *evalAnd: return &evalExhaustiveAnd{ - id: expr.id, - terms: expr.terms, + id: expr.id, + lhs: expr.lhs, + rhs: expr.rhs, }, nil case *evalFold: expr.exhaustive = true diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go index 8f0bdb7b8ee..febf9d8a834 100644 --- a/vendor/github.com/google/cel-go/interpreter/dispatcher.go +++ b/vendor/github.com/google/cel-go/interpreter/dispatcher.go @@ -17,7 +17,7 @@ package interpreter import ( "fmt" - "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/interpreter/functions" ) // Dispatcher resolves function calls to their appropriate overload. diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go index 4bdd1fdc732..cc0d3e6f948 100644 --- a/vendor/github.com/google/cel-go/interpreter/evalstate.go +++ b/vendor/github.com/google/cel-go/interpreter/evalstate.go @@ -66,11 +66,7 @@ func (s *evalState) Value(exprID int64) (ref.Val, bool) { // SetValue is an implementation of the EvalState interface method. func (s *evalState) SetValue(exprID int64, val ref.Val) { - if val == nil { - delete(s.values, exprID) - } else { - s.values[exprID] = val - } + s.values[exprID] = val } // Reset implements the EvalState interface method. diff --git a/vendor/github.com/google/cel-go/interpreter/formatting.go b/vendor/github.com/google/cel-go/interpreter/formatting.go index e3f7533745e..6a98f6fa564 100644 --- a/vendor/github.com/google/cel-go/interpreter/formatting.go +++ b/vendor/github.com/google/cel-go/interpreter/formatting.go @@ -25,7 +25,7 @@ import ( "github.com/google/cel-go/common/types/ref" ) -type typeVerifier func(int64, ...ref.Type) (bool, error) +type typeVerifier func(int64, ...*types.TypeValue) (bool, error) // InterpolateFormattedString checks the syntax and cardinality of any string.format calls present in the expression and reports // any errors at compile time. diff --git a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel index 4a80c3ea085..846d11bf470 100644 --- a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel +++ b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel @@ -7,11 +7,16 @@ package( go_library( name = "go_default_library", - srcs = [ + srcs = [ "functions.go", + "standard.go", ], importpath = "github.com/google/cel-go/interpreter/functions", deps = [ - "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", ], ) diff --git a/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/vendor/github.com/google/cel-go/interpreter/functions/functions.go index 21ffb69246a..9816017522f 100644 --- a/vendor/github.com/google/cel-go/interpreter/functions/functions.go +++ b/vendor/github.com/google/cel-go/interpreter/functions/functions.go @@ -16,7 +16,7 @@ // interpreter and as declared within the checker#StandardDeclarations. package functions -import fn "github.com/google/cel-go/common/functions" +import "github.com/google/cel-go/common/types/ref" // Overload defines a named overload of a function, indicating an operand trait // which must be present on the first argument to the overload as well as one @@ -26,14 +26,37 @@ import fn "github.com/google/cel-go/common/functions" // and the specializations simplify the call contract for implementers of // types with operator overloads. Any added complexity is assumed to be handled // by the generic FunctionOp. -type Overload = fn.Overload +type Overload struct { + // Operator name as written in an expression or defined within + // operators.go. + Operator string + + // Operand trait used to dispatch the call. The zero-value indicates a + // global function overload or that one of the Unary / Binary / Function + // definitions should be used to execute the call. + OperandTrait int + + // Unary defines the overload with a UnaryOp implementation. May be nil. + Unary UnaryOp + + // Binary defines the overload with a BinaryOp implementation. May be nil. + Binary BinaryOp + + // Function defines the overload with a FunctionOp implementation. May be + // nil. + Function FunctionOp + + // NonStrict specifies whether the Overload will tolerate arguments that + // are types.Err or types.Unknown. + NonStrict bool +} // UnaryOp is a function that takes a single value and produces an output. -type UnaryOp = fn.UnaryOp +type UnaryOp func(value ref.Val) ref.Val // BinaryOp is a function that takes two values and produces an output. -type BinaryOp = fn.BinaryOp +type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val // FunctionOp is a function with accepts zero or more arguments and produces // a value or error as a result. -type FunctionOp = fn.FunctionOp +type FunctionOp func(values ...ref.Val) ref.Val diff --git a/vendor/github.com/google/cel-go/interpreter/functions/standard.go b/vendor/github.com/google/cel-go/interpreter/functions/standard.go new file mode 100644 index 00000000000..73e936114f2 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/functions/standard.go @@ -0,0 +1,270 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package functions + +import ( + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// StandardOverloads returns the definitions of the built-in overloads. +func StandardOverloads() []*Overload { + return []*Overload{ + // Logical not (!a) + { + Operator: operators.LogicalNot, + OperandTrait: traits.NegatorType, + Unary: func(value ref.Val) ref.Val { + if !types.IsBool(value) { + return types.ValOrErr(value, "no such overload") + } + return value.(traits.Negater).Negate() + }}, + // Not strictly false: IsBool(a) ? a : true + { + Operator: operators.NotStrictlyFalse, + Unary: notStrictlyFalse}, + // Deprecated: not strictly false, may be overridden in the environment. + { + Operator: operators.OldNotStrictlyFalse, + Unary: notStrictlyFalse}, + + // Less than operator + {Operator: operators.Less, + OperandTrait: traits.ComparerType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne { + return types.True + } + if cmp == types.IntOne || cmp == types.IntZero { + return types.False + } + return cmp + }}, + + // Less than or equal operator + {Operator: operators.LessEquals, + OperandTrait: traits.ComparerType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntOne { + return types.False + } + return cmp + }}, + + // Greater than operator + {Operator: operators.Greater, + OperandTrait: traits.ComparerType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne { + return types.True + } + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.False + } + return cmp + }}, + + // Greater than equal operators + {Operator: operators.GreaterEquals, + OperandTrait: traits.ComparerType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntNegOne { + return types.False + } + return cmp + }}, + + // Add operator + {Operator: operators.Add, + OperandTrait: traits.AdderType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Adder).Add(rhs) + }}, + + // Subtract operators + {Operator: operators.Subtract, + OperandTrait: traits.SubtractorType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Subtractor).Subtract(rhs) + }}, + + // Multiply operator + {Operator: operators.Multiply, + OperandTrait: traits.MultiplierType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Multiplier).Multiply(rhs) + }}, + + // Divide operator + {Operator: operators.Divide, + OperandTrait: traits.DividerType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Divider).Divide(rhs) + }}, + + // Modulo operator + {Operator: operators.Modulo, + OperandTrait: traits.ModderType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Modder).Modulo(rhs) + }}, + + // Negate operator + {Operator: operators.Negate, + OperandTrait: traits.NegatorType, + Unary: func(value ref.Val) ref.Val { + if types.IsBool(value) { + return types.ValOrErr(value, "no such overload") + } + return value.(traits.Negater).Negate() + }}, + + // Index operator + {Operator: operators.Index, + OperandTrait: traits.IndexerType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Indexer).Get(rhs) + }}, + + // Size function + {Operator: overloads.Size, + OperandTrait: traits.SizerType, + Unary: func(value ref.Val) ref.Val { + return value.(traits.Sizer).Size() + }}, + + // In operator + {Operator: operators.In, Binary: inAggregate}, + // Deprecated: in operator, may be overridden in the environment. + {Operator: operators.OldIn, Binary: inAggregate}, + + // Matches function + {Operator: overloads.Matches, + OperandTrait: traits.MatcherType, + Binary: func(lhs ref.Val, rhs ref.Val) ref.Val { + return lhs.(traits.Matcher).Match(rhs) + }}, + + // Type conversion functions + // TODO: verify type conversion safety of numeric values. + + // Int conversions. + {Operator: overloads.TypeConvertInt, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.IntType) + }}, + + // Uint conversions. + {Operator: overloads.TypeConvertUint, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.UintType) + }}, + + // Double conversions. + {Operator: overloads.TypeConvertDouble, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.DoubleType) + }}, + + // Bool conversions. + {Operator: overloads.TypeConvertBool, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.BoolType) + }}, + + // Bytes conversions. + {Operator: overloads.TypeConvertBytes, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.BytesType) + }}, + + // String conversions. + {Operator: overloads.TypeConvertString, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.StringType) + }}, + + // Timestamp conversions. + {Operator: overloads.TypeConvertTimestamp, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.TimestampType) + }}, + + // Duration conversions. + {Operator: overloads.TypeConvertDuration, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.DurationType) + }}, + + // Type operations. + {Operator: overloads.TypeConvertType, + Unary: func(value ref.Val) ref.Val { + return value.ConvertToType(types.TypeType) + }}, + + // Dyn conversion (identity function). + {Operator: overloads.TypeConvertDyn, + Unary: func(value ref.Val) ref.Val { + return value + }}, + + {Operator: overloads.Iterator, + OperandTrait: traits.IterableType, + Unary: func(value ref.Val) ref.Val { + return value.(traits.Iterable).Iterator() + }}, + + {Operator: overloads.HasNext, + OperandTrait: traits.IteratorType, + Unary: func(value ref.Val) ref.Val { + return value.(traits.Iterator).HasNext() + }}, + + {Operator: overloads.Next, + OperandTrait: traits.IteratorType, + Unary: func(value ref.Val) ref.Val { + return value.(traits.Iterator).Next() + }}, + } + +} + +func notStrictlyFalse(value ref.Val) ref.Val { + if types.IsBool(value) { + return value + } + return types.True +} + +func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { + if rhs.Type().HasTrait(traits.ContainerType) { + return rhs.(traits.Container).Contains(lhs) + } + return types.ValOrErr(rhs, "no such overload") +} diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go index c4598dfa73b..32e2bcb7dea 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpretable.go +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -17,12 +17,12 @@ package interpreter import ( "fmt" - "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" "github.com/google/cel-go/common/overloads" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter/functions" ) // Interpretable can accept a given Activation and produce a value along with @@ -52,7 +52,7 @@ type InterpretableAttribute interface { Attr() Attribute // Adapter returns the type adapter to be used for adapting resolved Attribute values. - Adapter() types.Adapter + Adapter() ref.TypeAdapter // AddQualifier proxies the Attribute.AddQualifier method. // @@ -202,8 +202,9 @@ func (cons *evalConst) Value() ref.Val { } type evalOr struct { - id int64 - terms []Interpretable + id int64 + lhs Interpretable + rhs Interpretable } // ID implements the Interpretable interface method. @@ -213,39 +214,41 @@ func (or *evalOr) ID() int64 { // Eval implements the Interpretable interface method. func (or *evalOr) Eval(ctx Activation) ref.Val { - var err ref.Val = nil - var unk *types.Unknown - for _, term := range or.terms { - val := term.Eval(ctx) - boolVal, ok := val.(types.Bool) - // short-circuit on true. - if ok && boolVal == types.True { - return types.True - } - if !ok { - isUnk := false - unk, isUnk = types.MaybeMergeUnknowns(val, unk) - if !isUnk && err == nil { - if types.IsError(val) { - err = val - } else { - err = types.MaybeNoSuchOverloadErr(val) - } - } - } + // short-circuit lhs. + lVal := or.lhs.Eval(ctx) + lBool, lok := lVal.(types.Bool) + if lok && lBool == types.True { + return types.True } - if unk != nil { - return unk + // short-circuit on rhs. + rVal := or.rhs.Eval(ctx) + rBool, rok := rVal.(types.Bool) + if rok && rBool == types.True { + return types.True } - if err != nil { - return err + // return if both sides are bool false. + if lok && rok { + return types.False } - return types.False + // TODO: return both values as a set if both are unknown or error. + // prefer left unknown to right unknown. + if types.IsUnknown(lVal) { + return lVal + } + if types.IsUnknown(rVal) { + return rVal + } + // If the left-hand side is non-boolean return it as the error. + if types.IsError(lVal) { + return lVal + } + return types.ValOrErr(rVal, "no such overload") } type evalAnd struct { - id int64 - terms []Interpretable + id int64 + lhs Interpretable + rhs Interpretable } // ID implements the Interpretable interface method. @@ -255,34 +258,35 @@ func (and *evalAnd) ID() int64 { // Eval implements the Interpretable interface method. func (and *evalAnd) Eval(ctx Activation) ref.Val { - var err ref.Val = nil - var unk *types.Unknown - for _, term := range and.terms { - val := term.Eval(ctx) - boolVal, ok := val.(types.Bool) - // short-circuit on false. - if ok && boolVal == types.False { - return types.False - } - if !ok { - isUnk := false - unk, isUnk = types.MaybeMergeUnknowns(val, unk) - if !isUnk && err == nil { - if types.IsError(val) { - err = val - } else { - err = types.MaybeNoSuchOverloadErr(val) - } - } - } + // short-circuit lhs. + lVal := and.lhs.Eval(ctx) + lBool, lok := lVal.(types.Bool) + if lok && lBool == types.False { + return types.False } - if unk != nil { - return unk + // short-circuit on rhs. + rVal := and.rhs.Eval(ctx) + rBool, rok := rVal.(types.Bool) + if rok && rBool == types.False { + return types.False } - if err != nil { - return err + // return if both sides are bool true. + if lok && rok { + return types.True + } + // TODO: return both values as a set if both are unknown or error. + // prefer left unknown to right unknown. + if types.IsUnknown(lVal) { + return lVal + } + if types.IsUnknown(rVal) { + return rVal } - return types.True + // If the left-hand side is non-boolean return it as the error. + if types.IsError(lVal) { + return lVal + } + return types.ValOrErr(rVal, "no such overload") } type evalEq struct { @@ -575,7 +579,7 @@ type evalList struct { elems []Interpretable optionals []bool hasOptionals bool - adapter types.Adapter + adapter ref.TypeAdapter } // ID implements the Interpretable interface method. @@ -621,7 +625,7 @@ type evalMap struct { vals []Interpretable optionals []bool hasOptionals bool - adapter types.Adapter + adapter ref.TypeAdapter } // ID implements the Interpretable interface method. @@ -685,7 +689,7 @@ type evalObj struct { vals []Interpretable optionals []bool hasOptionals bool - provider types.Provider + provider ref.TypeProvider } // ID implements the Interpretable interface method. @@ -735,7 +739,7 @@ type evalFold struct { cond Interpretable step Interpretable result Interpretable - adapter types.Adapter + adapter ref.TypeAdapter exhaustive bool interruptable bool } @@ -861,40 +865,18 @@ type evalWatchAttr struct { // AddQualifier creates a wrapper over the incoming qualifier which observes the qualification // result. func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) { - switch qual := q.(type) { - // By default, the qualifier is either a constant or an attribute - // There may be some custom cases where the attribute is neither. - case ConstantQualifier: - // Expose a method to test whether the qualifier matches the input pattern. + cq, isConst := q.(ConstantQualifier) + if isConst { q = &evalWatchConstQual{ - ConstantQualifier: qual, + ConstantQualifier: cq, observer: e.observer, - adapter: e.Adapter(), - } - case *evalWatchAttr: - // Unwrap the evalWatchAttr since the observation will be applied during Qualify or - // QualifyIfPresent rather than Eval. - q = &evalWatchAttrQual{ - Attribute: qual.InterpretableAttribute, - observer: e.observer, - adapter: e.Adapter(), - } - case Attribute: - // Expose methods which intercept the qualification prior to being applied as a qualifier. - // Using this interface ensures that the qualifier is converted to a constant value one - // time during attribute pattern matching as the method embeds the Attribute interface - // needed to trip the conversion to a constant. - q = &evalWatchAttrQual{ - Attribute: qual, - observer: e.observer, - adapter: e.Adapter(), + adapter: e.InterpretableAttribute.Adapter(), } - default: - // This is likely a custom qualifier type. + } else { q = &evalWatchQual{ - Qualifier: qual, + Qualifier: q, observer: e.observer, - adapter: e.Adapter(), + adapter: e.InterpretableAttribute.Adapter(), } } _, err := e.InterpretableAttribute.AddQualifier(q) @@ -913,7 +895,7 @@ func (e *evalWatchAttr) Eval(vars Activation) ref.Val { type evalWatchConstQual struct { ConstantQualifier observer EvalObserver - adapter types.Adapter + adapter ref.TypeAdapter } // Qualify observes the qualification of a object via a constant boolean, int, string, or uint. @@ -952,48 +934,11 @@ func (e *evalWatchConstQual) QualifierValueEquals(value any) bool { return ok && qve.QualifierValueEquals(value) } -// evalWatchAttrQual observes the qualification of an object by a value computed at runtime. -type evalWatchAttrQual struct { - Attribute - observer EvalObserver - adapter ref.TypeAdapter -} - -// Qualify observes the qualification of a object via a value computed at runtime. -func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) { - out, err := e.Attribute.Qualify(vars, obj) - var val ref.Val - if err != nil { - val = types.WrapErr(err) - } else { - val = e.adapter.NativeToValue(out) - } - e.observer(e.ID(), e.Attribute, val) - return out, err -} - -// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. -func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { - out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly) - var val ref.Val - if err != nil { - val = types.WrapErr(err) - } else if out != nil { - val = e.adapter.NativeToValue(out) - } else if presenceOnly { - val = types.Bool(present) - } - if present || presenceOnly { - e.observer(e.ID(), e.Attribute, val) - } - return out, present, err -} - // evalWatchQual observes the qualification of an object by a value computed at runtime. type evalWatchQual struct { Qualifier observer EvalObserver - adapter types.Adapter + adapter ref.TypeAdapter } // Qualify observes the qualification of a object via a value computed at runtime. @@ -1041,8 +986,9 @@ func (e *evalWatchConst) Eval(vars Activation) ref.Val { // evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation. type evalExhaustiveOr struct { - id int64 - terms []Interpretable + id int64 + lhs Interpretable + rhs Interpretable } // ID implements the Interpretable interface method. @@ -1052,44 +998,38 @@ func (or *evalExhaustiveOr) ID() int64 { // Eval implements the Interpretable interface method. func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val { - var err ref.Val = nil - var unk *types.Unknown - isTrue := false - for _, term := range or.terms { - val := term.Eval(ctx) - boolVal, ok := val.(types.Bool) - // flag the result as true - if ok && boolVal == types.True { - isTrue = true - } - if !ok && !isTrue { - isUnk := false - unk, isUnk = types.MaybeMergeUnknowns(val, unk) - if !isUnk && err == nil { - if types.IsError(val) { - err = val - } else { - err = types.MaybeNoSuchOverloadErr(val) - } - } - } + lVal := or.lhs.Eval(ctx) + rVal := or.rhs.Eval(ctx) + lBool, lok := lVal.(types.Bool) + if lok && lBool == types.True { + return types.True } - if isTrue { + rBool, rok := rVal.(types.Bool) + if rok && rBool == types.True { return types.True } - if unk != nil { - return unk + if lok && rok { + return types.False } - if err != nil { - return err + if types.IsUnknown(lVal) { + return lVal } - return types.False + if types.IsUnknown(rVal) { + return rVal + } + // TODO: Combine the errors into a set in the future. + // If the left-hand side is non-boolean return it as the error. + if types.IsError(lVal) { + return lVal + } + return types.MaybeNoSuchOverloadErr(rVal) } // evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation. type evalExhaustiveAnd struct { - id int64 - terms []Interpretable + id int64 + lhs Interpretable + rhs Interpretable } // ID implements the Interpretable interface method. @@ -1099,45 +1039,38 @@ func (and *evalExhaustiveAnd) ID() int64 { // Eval implements the Interpretable interface method. func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val { - var err ref.Val = nil - var unk *types.Unknown - isFalse := false - for _, term := range and.terms { - val := term.Eval(ctx) - boolVal, ok := val.(types.Bool) - // short-circuit on false. - if ok && boolVal == types.False { - isFalse = true - } - if !ok && !isFalse { - isUnk := false - unk, isUnk = types.MaybeMergeUnknowns(val, unk) - if !isUnk && err == nil { - if types.IsError(val) { - err = val - } else { - err = types.MaybeNoSuchOverloadErr(val) - } - } - } + lVal := and.lhs.Eval(ctx) + rVal := and.rhs.Eval(ctx) + lBool, lok := lVal.(types.Bool) + if lok && lBool == types.False { + return types.False } - if isFalse { + rBool, rok := rVal.(types.Bool) + if rok && rBool == types.False { return types.False } - if unk != nil { - return unk + if lok && rok { + return types.True } - if err != nil { - return err + if types.IsUnknown(lVal) { + return lVal + } + if types.IsUnknown(rVal) { + return rVal } - return types.True + // TODO: Combine the errors into a set in the future. + // If the left-hand side is non-boolean return it as the error. + if types.IsError(lVal) { + return lVal + } + return types.MaybeNoSuchOverloadErr(rVal) } // evalExhaustiveConditional is like evalConditional, but does not short-circuit argument // evaluation. type evalExhaustiveConditional struct { id int64 - adapter types.Adapter + adapter ref.TypeAdapter attr *conditionalAttribute } @@ -1169,7 +1102,7 @@ func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { // evalAttr evaluates an Attribute value. type evalAttr struct { - adapter types.Adapter + adapter ref.TypeAdapter attr Attribute optional bool } @@ -1194,7 +1127,7 @@ func (a *evalAttr) Attr() Attribute { } // Adapter implements the InterpretableAttribute interface method. -func (a *evalAttr) Adapter() types.Adapter { +func (a *evalAttr) Adapter() ref.TypeAdapter { return a.adapter } diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go index 00fc74732c6..707a6105a1c 100644 --- a/vendor/github.com/google/cel-go/interpreter/interpreter.go +++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -18,10 +18,9 @@ package interpreter import ( - "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter/functions" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -30,7 +29,7 @@ import ( type Interpreter interface { // NewInterpretable creates an Interpretable from a checked expression and an // optional list of InterpretableDecorator values. - NewInterpretable(checked *ast.CheckedAST, decorators ...InterpretableDecorator) (Interpretable, error) + NewInterpretable(checked *exprpb.CheckedExpr, decorators ...InterpretableDecorator) (Interpretable, error) // NewUncheckedInterpretable returns an Interpretable from a parsed expression // and an optional list of InterpretableDecorator values. @@ -155,8 +154,8 @@ func CompileRegexConstants(regexOptimizations ...*RegexOptimization) Interpretab type exprInterpreter struct { dispatcher Dispatcher container *containers.Container - provider types.Provider - adapter types.Adapter + provider ref.TypeProvider + adapter ref.TypeAdapter attrFactory AttributeFactory } @@ -164,8 +163,8 @@ type exprInterpreter struct { // throughout the Eval of all Interpretable instances generated from it. func NewInterpreter(dispatcher Dispatcher, container *containers.Container, - provider types.Provider, - adapter types.Adapter, + provider ref.TypeProvider, + adapter ref.TypeAdapter, attrFactory AttributeFactory) Interpreter { return &exprInterpreter{ dispatcher: dispatcher, @@ -175,9 +174,20 @@ func NewInterpreter(dispatcher Dispatcher, attrFactory: attrFactory} } +// NewStandardInterpreter builds a Dispatcher and TypeProvider with support for all of the CEL +// builtins defined in the language definition. +func NewStandardInterpreter(container *containers.Container, + provider ref.TypeProvider, + adapter ref.TypeAdapter, + resolver AttributeFactory) Interpreter { + dispatcher := NewDispatcher() + dispatcher.Add(functions.StandardOverloads()...) + return NewInterpreter(dispatcher, container, provider, adapter, resolver) +} + // NewIntepretable implements the Interpreter interface method. func (i *exprInterpreter) NewInterpretable( - checked *ast.CheckedAST, + checked *exprpb.CheckedExpr, decorators ...InterpretableDecorator) (Interpretable, error) { p := newPlanner( i.dispatcher, @@ -187,7 +197,7 @@ func (i *exprInterpreter) NewInterpretable( i.container, checked, decorators...) - return p.Plan(checked.Expr) + return p.Plan(checked.GetExpr()) } // NewUncheckedIntepretable implements the Interpreter interface method. diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go index 757cd080e5c..0b65d0fa90d 100644 --- a/vendor/github.com/google/cel-go/interpreter/planner.go +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -18,12 +18,10 @@ import ( "fmt" "strings" - "github.com/google/cel-go/common/ast" "github.com/google/cel-go/common/containers" - "github.com/google/cel-go/common/functions" "github.com/google/cel-go/common/operators" - "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter/functions" exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" ) @@ -39,11 +37,11 @@ type interpretablePlanner interface { // functions, types, and namespaced identifiers at plan time rather than at runtime since // it only needs to be done once and may be semi-expensive to compute. func newPlanner(disp Dispatcher, - provider types.Provider, - adapter types.Adapter, + provider ref.TypeProvider, + adapter ref.TypeAdapter, attrFactory AttributeFactory, cont *containers.Container, - checked *ast.CheckedAST, + checked *exprpb.CheckedExpr, decorators ...InterpretableDecorator) interpretablePlanner { return &planner{ disp: disp, @@ -51,8 +49,8 @@ func newPlanner(disp Dispatcher, adapter: adapter, attrFactory: attrFactory, container: cont, - refMap: checked.ReferenceMap, - typeMap: checked.TypeMap, + refMap: checked.GetReferenceMap(), + typeMap: checked.GetTypeMap(), decorators: decorators, } } @@ -61,8 +59,8 @@ func newPlanner(disp Dispatcher, // TypeAdapter, and Container to resolve functions and types at plan time. Namespaces present in // Select expressions are resolved lazily at evaluation time. func newUncheckedPlanner(disp Dispatcher, - provider types.Provider, - adapter types.Adapter, + provider ref.TypeProvider, + adapter ref.TypeAdapter, attrFactory AttributeFactory, cont *containers.Container, decorators ...InterpretableDecorator) interpretablePlanner { @@ -72,8 +70,8 @@ func newUncheckedPlanner(disp Dispatcher, adapter: adapter, attrFactory: attrFactory, container: cont, - refMap: make(map[int64]*ast.ReferenceInfo), - typeMap: make(map[int64]*types.Type), + refMap: make(map[int64]*exprpb.Reference), + typeMap: make(map[int64]*exprpb.Type), decorators: decorators, } } @@ -81,12 +79,12 @@ func newUncheckedPlanner(disp Dispatcher, // planner is an implementation of the interpretablePlanner interface. type planner struct { disp Dispatcher - provider types.Provider - adapter types.Adapter + provider ref.TypeProvider + adapter ref.TypeAdapter attrFactory AttributeFactory container *containers.Container - refMap map[int64]*ast.ReferenceInfo - typeMap map[int64]*types.Type + refMap map[int64]*exprpb.Reference + typeMap map[int64]*exprpb.Type decorators []InterpretableDecorator } @@ -145,19 +143,22 @@ func (p *planner) planIdent(expr *exprpb.Expr) (Interpretable, error) { }, nil } -func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) { +func (p *planner) planCheckedIdent(id int64, identRef *exprpb.Reference) (Interpretable, error) { // Plan a constant reference if this is the case for this simple identifier. - if identRef.Value != nil { - return NewConstValue(id, identRef.Value), nil + if identRef.GetValue() != nil { + return p.Plan(&exprpb.Expr{Id: id, + ExprKind: &exprpb.Expr_ConstExpr{ + ConstExpr: identRef.GetValue(), + }}) } // Check to see whether the type map indicates this is a type name. All types should be // registered with the provider. cType := p.typeMap[id] - if cType.Kind() == types.TypeKind { - cVal, found := p.provider.FindIdent(identRef.Name) + if cType.GetType() != nil { + cVal, found := p.provider.FindIdent(identRef.GetName()) if !found { - return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name) + return nil, fmt.Errorf("reference to undefined type: %s", identRef.GetName()) } return NewConstValue(id, cVal), nil } @@ -165,7 +166,7 @@ func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Inter // Otherwise, return the attribute for the resolved identifier name. return &evalAttr{ adapter: p.adapter, - attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name), + attr: p.attrFactory.AbsoluteAttribute(id, identRef.GetName()), }, nil } @@ -428,16 +429,18 @@ func (p *planner) planCallNotEqual(expr *exprpb.Expr, args []Interpretable) (Int // planCallLogicalAnd generates a logical and (&&) Interpretable. func (p *planner) planCallLogicalAnd(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { return &evalAnd{ - id: expr.GetId(), - terms: args, + id: expr.GetId(), + lhs: args[0], + rhs: args[1], }, nil } // planCallLogicalOr generates a logical or (||) Interpretable. func (p *planner) planCallLogicalOr(expr *exprpb.Expr, args []Interpretable) (Interpretable, error) { return &evalOr{ - id: expr.GetId(), - terms: args, + id: expr.GetId(), + lhs: args[0], + rhs: args[1], }, nil } @@ -473,7 +476,7 @@ func (p *planner) planCallConditional(expr *exprpb.Expr, args []Interpretable) ( func (p *planner) planCallIndex(expr *exprpb.Expr, args []Interpretable, optional bool) (Interpretable, error) { op := args[0] ind := args[1] - opType := p.typeMap[op.ID()] + opType := p.typeMap[expr.GetCallExpr().GetTarget().GetId()] // Establish the attribute reference. var err error @@ -672,7 +675,7 @@ func (p *planner) constValue(c *exprpb.Constant) (ref.Val, error) { // namespace resolution rules to it in a scan over possible matching types in the TypeProvider. func (p *planner) resolveTypeName(typeName string) (string, bool) { for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) { - if _, found := p.provider.FindStructType(qualifiedTypeName); found { + if _, found := p.provider.FindType(qualifiedTypeName); found { return qualifiedTypeName, true } } @@ -699,8 +702,8 @@ func (p *planner) resolveFunction(expr *exprpb.Expr) (*exprpb.Expr, string, stri // function name as the fnName value. oRef, hasOverload := p.refMap[expr.GetId()] if hasOverload { - if len(oRef.OverloadIDs) == 1 { - return target, fnName, oRef.OverloadIDs[0] + if len(oRef.GetOverloadId()) == 1 { + return target, fnName, oRef.GetOverloadId()[0] } // Note, this namespaced function name will not appear as a fully qualified name in ASTs // built and stored before cel-go v0.5.0; however, this functionality did not work at all diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go index b8834b1cb85..d1b5d6bd6bf 100644 --- a/vendor/github.com/google/cel-go/interpreter/prune.go +++ b/vendor/github.com/google/cel-go/interpreter/prune.go @@ -341,11 +341,6 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { } } if macro, found := p.macroCalls[node.GetId()]; found { - // Ensure that intermediate values for the comprehension are cleared during pruning - compre := node.GetComprehensionExpr() - if compre != nil { - visit(macro, clearIterVarVisitor(compre.IterVar, p.state)) - } // prune the expression in terms of the macro call instead of the expanded form. if newMacro, pruned := p.prune(macro); pruned { p.macroCalls[node.GetId()] = newMacro @@ -493,27 +488,6 @@ func (p *astPruner) prune(node *exprpb.Expr) (*exprpb.Expr, bool) { }, }, true } - case *exprpb.Expr_ComprehensionExpr: - compre := node.GetComprehensionExpr() - // Only the range of the comprehension is pruned since the state tracking only records - // the last iteration of the comprehension and not each step in the evaluation which - // means that the any residuals computed in between might be inaccurate. - if newRange, pruned := p.maybePrune(compre.GetIterRange()); pruned { - return &exprpb.Expr{ - Id: node.GetId(), - ExprKind: &exprpb.Expr_ComprehensionExpr{ - ComprehensionExpr: &exprpb.Expr_Comprehension{ - IterVar: compre.GetIterVar(), - IterRange: newRange, - AccuVar: compre.GetAccuVar(), - AccuInit: compre.GetAccuInit(), - LoopCondition: compre.GetLoopCondition(), - LoopStep: compre.GetLoopStep(), - Result: compre.GetResult(), - }, - }, - }, true - } } return node, false } @@ -550,17 +524,6 @@ func getMaxID(expr *exprpb.Expr) int64 { return maxID } -func clearIterVarVisitor(varName string, state EvalState) astVisitor { - return astVisitor{ - visitExpr: func(e *exprpb.Expr) { - ident := e.GetIdentExpr() - if ident != nil && ident.GetName() == varName { - state.SetValue(e.GetId(), nil) - } - }, - } -} - func maxIDVisitor(maxID *int64) astVisitor { return astVisitor{ visitExpr: func(e *exprpb.Expr) { @@ -580,9 +543,7 @@ func visit(expr *exprpb.Expr, visitor astVisitor) { exprs := []*exprpb.Expr{expr} for len(exprs) != 0 { e := exprs[0] - if visitor.visitExpr != nil { - visitor.visitExpr(e) - } + visitor.visitExpr(e) exprs = exprs[1:] switch e.GetExprKind().(type) { case *exprpb.Expr_SelectExpr: @@ -606,9 +567,7 @@ func visit(expr *exprpb.Expr, visitor astVisitor) { exprs = append(exprs, list.GetElements()...) case *exprpb.Expr_StructExpr: for _, entry := range e.GetStructExpr().GetEntries() { - if visitor.visitEntry != nil { - visitor.visitEntry(entry) - } + visitor.visitEntry(entry) if entry.GetMapKey() != nil { exprs = append(exprs, entry.GetMapKey()) } diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go index b9b307c1559..80e7f613449 100644 --- a/vendor/github.com/google/cel-go/interpreter/runtimecost.go +++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -65,21 +65,13 @@ func CostObserver(tracker *CostTracker) EvalObserver { // While the field names are identical, the boolean operation eval structs do not share an interface and so // must be handled individually. case *evalOr: - for _, term := range t.terms { - tracker.stack.drop(term.ID()) - } + tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) case *evalAnd: - for _, term := range t.terms { - tracker.stack.drop(term.ID()) - } + tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) case *evalExhaustiveOr: - for _, term := range t.terms { - tracker.stack.drop(term.ID()) - } + tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) case *evalExhaustiveAnd: - for _, term := range t.terms { - tracker.stack.drop(term.ID()) - } + tracker.stack.drop(t.rhs.ID(), t.lhs.ID()) case *evalFold: tracker.stack.drop(t.iterRange.ID()) case Qualifier: @@ -133,7 +125,6 @@ func PresenceTestHasCost(hasCost bool) CostTrackerOption { func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) { tracker := &CostTracker{ Estimator: estimator, - overloadTrackers: map[string]FunctionTracker{}, presenceTestHasCost: true, } for _, opt := range opts { @@ -145,24 +136,9 @@ func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (* return tracker, nil } -// OverloadCostTracker binds an overload ID to a runtime FunctionTracker implementation. -// -// OverloadCostTracker instances augment or override ActualCostEstimator decisions, allowing for versioned and/or -// optional cost tracking changes. -func OverloadCostTracker(overloadID string, fnTracker FunctionTracker) CostTrackerOption { - return func(tracker *CostTracker) error { - tracker.overloadTrackers[overloadID] = fnTracker - return nil - } -} - -// FunctionTracker computes the actual cost of evaluating the functions with the given arguments and result. -type FunctionTracker func(args []ref.Val, result ref.Val) *uint64 - // CostTracker represents the information needed for tracking runtime cost. type CostTracker struct { Estimator ActualCostEstimator - overloadTrackers map[string]FunctionTracker Limit *uint64 presenceTestHasCost bool @@ -175,19 +151,10 @@ func (c *CostTracker) ActualCost() uint64 { return c.cost } -func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result ref.Val) uint64 { +func (c *CostTracker) costCall(call InterpretableCall, argValues []ref.Val, result ref.Val) uint64 { var cost uint64 - if len(c.overloadTrackers) != 0 { - if tracker, found := c.overloadTrackers[call.OverloadID()]; found { - callCost := tracker(args, result) - if callCost != nil { - cost += *callCost - return cost - } - } - } if c.Estimator != nil { - callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), args, result) + callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), argValues, result) if callCost != nil { cost += *callCost return cost @@ -198,11 +165,11 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re switch call.OverloadID() { // O(n) functions case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString: - cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor)) case overloads.InList: // If a list is composed entirely of constant values this is O(1), but we don't account for that here. // We just assume all list containment checks are O(n). - cost += c.actualSize(args[1]) + cost += c.actualSize(argValues[1]) // O(min(m, n)) functions case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, @@ -210,8 +177,8 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.), // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost // of 1. - lhsSize := c.actualSize(args[0]) - rhsSize := c.actualSize(args[1]) + lhsSize := c.actualSize(argValues[0]) + rhsSize := c.actualSize(argValues[1]) minSize := lhsSize if rhsSize < minSize { minSize = rhsSize @@ -220,23 +187,23 @@ func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result re // O(m+n) functions case overloads.AddString, overloads.AddBytes: // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over. - cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor)) + cost += uint64(math.Ceil(float64(c.actualSize(argValues[0])+c.actualSize(argValues[1])) * common.StringTraversalCostFactor)) // O(nm) functions case overloads.MatchesString: // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 // in case where string is empty but regex is still expensive. - strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor)) + strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(argValues[0]))) * common.StringTraversalCostFactor)) // We don't know how many expressions are in the regex, just the string length (a huge // improvement here would be to somehow get a count the number of expressions in the regex or // how many states are in the regex state machine and use that to measure regex cost). // For now, we're making a guess that each expression in a regex is typically at least 4 chars // in length. - regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor)) + regexCost := uint64(math.Ceil(float64(c.actualSize(argValues[1])) * common.RegexStringLengthCostFactor)) cost += strCost * regexCost case overloads.ContainsString: - strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) - substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor)) + strCost := uint64(math.Ceil(float64(c.actualSize(argValues[0])) * common.StringTraversalCostFactor)) + substrCost := uint64(math.Ceil(float64(c.actualSize(argValues[1])) * common.StringTraversalCostFactor)) cost += strCost * substrCost default: diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go index 93ae7a3ad8c..ce49bb87f8e 100644 --- a/vendor/github.com/google/cel-go/parser/errors.go +++ b/vendor/github.com/google/cel-go/parser/errors.go @@ -22,22 +22,9 @@ import ( // parseErrors is a specialization of Errors. type parseErrors struct { - errs *common.Errors -} - -// errorCount indicates the number of errors reported. -func (e *parseErrors) errorCount() int { - return len(e.errs.GetErrors()) -} - -func (e *parseErrors) internalError(message string) { - e.errs.ReportErrorAtID(0, common.NoLocation, message) + *common.Errors } func (e *parseErrors) syntaxError(l common.Location, message string) { - e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message)) -} - -func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) { - e.errs.ReportErrorAtID(id, l, message, args...) + e.ReportError(l, fmt.Sprintf("Syntax error: %s", message)) } diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go index a5f29e3d7ae..8f8f478ed12 100644 --- a/vendor/github.com/google/cel-go/parser/helper.go +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -193,15 +193,15 @@ func (p *parserHelper) newExpr(ctx any) *exprpb.Expr { func (p *parserHelper) id(ctx any) int64 { var location common.Location - switch c := ctx.(type) { + switch ctx.(type) { case antlr.ParserRuleContext: - token := c.GetStart() + token := (ctx.(antlr.ParserRuleContext)).GetStart() location = p.source.NewLocation(token.GetLine(), token.GetColumn()) case antlr.Token: - token := c + token := ctx.(antlr.Token) location = p.source.NewLocation(token.GetLine(), token.GetColumn()) case common.Location: - location = c + location = ctx.(common.Location) default: // This should only happen if the ctx is nil return -1 @@ -297,83 +297,67 @@ func (p *parserHelper) addMacroCall(exprID int64, function string, target *exprp } } -// logicManager compacts logical trees into a more efficient structure which is semantically -// equivalent with how the logic graph is constructed by the ANTLR parser. +// balancer performs tree balancing on operators whose arguments are of equal precedence. // -// The purpose of the logicManager is to ensure a compact serialization format for the logical &&, || +// The purpose of the balancer is to ensure a compact serialization format for the logical &&, || // operators which have a tendency to create long DAGs which are skewed in one direction. Since the // operators are commutative re-ordering the terms *must not* affect the evaluation result. // -// The logic manager will either render the terms to N-chained && / || operators as a single logical -// call with N-terms, or will rebalance the tree. Rebalancing the terms is a safe, if somewhat -// controversial choice as it alters the traditional order of execution assumptions present in most -// expressions. -type logicManager struct { - helper *parserHelper - function string - terms []*exprpb.Expr - ops []int64 - variadicASTs bool -} - -// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term. -func newVariadicLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { - return &logicManager{ - helper: h, - function: function, - terms: []*exprpb.Expr{term}, - ops: []int64{}, - variadicASTs: true, - } -} - -// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term. -func newBalancingLogicManager(h *parserHelper, function string, term *exprpb.Expr) *logicManager { - return &logicManager{ - helper: h, - function: function, - terms: []*exprpb.Expr{term}, - ops: []int64{}, - variadicASTs: false, +// Re-balancing the terms is a safe, if somewhat controversial choice. A better solution would be +// to make these functions variadic and update both the checker and interpreter to understand this; +// however, this is a more complex change. +// +// TODO: Consider replacing tree-balancing with variadic logical &&, || within the parser, checker, +// and interpreter. +type balancer struct { + helper *parserHelper + function string + terms []*exprpb.Expr + ops []int64 +} + +// newBalancer creates a balancer instance bound to a specific function and its first term. +func newBalancer(h *parserHelper, function string, term *exprpb.Expr) *balancer { + return &balancer{ + helper: h, + function: function, + terms: []*exprpb.Expr{term}, + ops: []int64{}, } } // addTerm adds an operation identifier and term to the set of terms to be balanced. -func (l *logicManager) addTerm(op int64, term *exprpb.Expr) { - l.terms = append(l.terms, term) - l.ops = append(l.ops, op) +func (b *balancer) addTerm(op int64, term *exprpb.Expr) { + b.terms = append(b.terms, term) + b.ops = append(b.ops, op) } -// toExpr renders the logic graph into an Expr value, either balancing a tree of logical -// operations or creating a variadic representation of the logical operator. -func (l *logicManager) toExpr() *exprpb.Expr { - if len(l.terms) == 1 { - return l.terms[0] - } - if l.variadicASTs { - return l.helper.newGlobalCall(l.ops[0], l.function, l.terms...) +// balance creates a balanced tree from the sub-terms and returns the final Expr value. +func (b *balancer) balance() *exprpb.Expr { + if len(b.terms) == 1 { + return b.terms[0] } - return l.balancedTree(0, len(l.ops)-1) + return b.balancedTree(0, len(b.ops)-1) } // balancedTree recursively balances the terms provided to a commutative operator. -func (l *logicManager) balancedTree(lo, hi int) *exprpb.Expr { +func (b *balancer) balancedTree(lo, hi int) *exprpb.Expr { mid := (lo + hi + 1) / 2 var left *exprpb.Expr if mid == lo { - left = l.terms[mid] + left = b.terms[mid] } else { - left = l.balancedTree(lo, mid-1) + left = b.balancedTree(lo, mid-1) } var right *exprpb.Expr if mid == hi { - right = l.terms[mid+1] + right = b.terms[mid+1] } else { - right = l.balancedTree(mid+1, hi) + right = b.balancedTree(mid+1, hi) } - return l.helper.newGlobalCall(l.ops[mid], l.function, left, right) + return b.helper.newGlobalCall(b.ops[mid], b.function, left, right) } type exprHelper struct { @@ -386,7 +370,7 @@ func (e *exprHelper) nextMacroID() int64 { } // Copy implements the ExprHelper interface method by producing a copy of the input Expr value -// with a fresh set of numeric identifiers the Expr and all its descendants. +// with a fresh set of numeric identifiers the Expr and all its descendents. func (e *exprHelper) Copy(expr *exprpb.Expr) *exprpb.Expr { copy := e.parserHelper.newExpr(e.parserHelper.getLocation(expr.GetId())) switch expr.GetExprKind().(type) { @@ -574,22 +558,11 @@ func (e *exprHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { // OffsetLocation implements the ExprHelper interface method. func (e *exprHelper) OffsetLocation(exprID int64) common.Location { - offset, found := e.parserHelper.positions[exprID] - if !found { - return common.NoLocation - } - location, found := e.parserHelper.source.OffsetLocation(offset) - if !found { - return common.NoLocation - } + offset := e.parserHelper.positions[exprID] + location, _ := e.parserHelper.source.OffsetLocation(offset) return location } -// NewError associates an error message with a given expression id, populating the source offset location of the error if possible. -func (e *exprHelper) NewError(exprID int64, message string) *common.Error { - return common.NewError(exprID, message, e.OffsetLocation(exprID)) -} - var ( // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations. exprHelperPool = &sync.Pool{ diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go index 6066e8ef4f8..80e5c66c656 100644 --- a/vendor/github.com/google/cel-go/parser/macro.go +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -232,9 +232,6 @@ type ExprHelper interface { // OffsetLocation returns the Location of the expression identifier. OffsetLocation(exprID int64) common.Location - - // NewError associates an error message with a given expression id. - NewError(exprID int64, message string) *common.Error } var ( @@ -327,7 +324,7 @@ func MakeExistsOne(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*ex func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") + return nil, &common.Error{Message: "argument is not an identifier"} } var fn *exprpb.Expr @@ -358,7 +355,7 @@ func MakeMap(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.E func MakeFilter(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument is not an identifier") + return nil, &common.Error{Message: "argument is not an identifier"} } filter := args[1] @@ -375,13 +372,17 @@ func MakeHas(eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.E if s, ok := args[0].ExprKind.(*exprpb.Expr_SelectExpr); ok { return eh.PresenceTest(s.SelectExpr.GetOperand(), s.SelectExpr.GetField()), nil } - return nil, eh.NewError(args[0].GetId(), "invalid argument to has() macro") + return nil, &common.Error{Message: "invalid argument to has() macro"} } func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *common.Error) { v, found := extractIdent(args[0]) if !found { - return nil, eh.NewError(args[0].GetId(), "argument must be a simple name") + location := eh.OffsetLocation(args[0].GetId()) + return nil, &common.Error{ + Message: "argument must be a simple name", + Location: location, + } } var init *exprpb.Expr @@ -410,7 +411,7 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target *exprpb.Expr, arg eh.GlobalCall(operators.Add, eh.AccuIdent(), oneExpr), eh.AccuIdent()) result = eh.GlobalCall(operators.Equals, eh.AccuIdent(), oneExpr) default: - return nil, eh.NewError(args[0].GetId(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) + return nil, &common.Error{Message: fmt.Sprintf("unrecognized quantifier '%v'", kind)} } return eh.Fold(v, target, AccumulatorName, init, condition, step, result), nil } diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go index 61fc3adec4c..674c697c5cd 100644 --- a/vendor/github.com/google/cel-go/parser/options.go +++ b/vendor/github.com/google/cel-go/parser/options.go @@ -25,7 +25,6 @@ type options struct { macros map[string]Macro populateMacroCalls bool enableOptionalSyntax bool - enableVariadicOperatorASTs bool } // Option configures the behavior of the parser. @@ -126,15 +125,3 @@ func EnableOptionalSyntax(optionalSyntax bool) Option { return nil } } - -// EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative -// operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])` -// -// The benefit of enabling variadic operators ASTs is a more compact representation deeply nested -// logic graphs. -func EnableVariadicOperatorASTs(varArgASTs bool) Option { - return func(opts *options) error { - opts.enableVariadicOperatorASTs = varArgASTs - return nil - } -} diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go index 109326a9399..e6f70f9060e 100644 --- a/vendor/github.com/google/cel-go/parser/parser.go +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -89,9 +89,8 @@ func mustNewParser(opts ...Option) *Parser { // Parse parses the expression represented by source and returns the result. func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors) { - errs := common.NewErrors(source) impl := parser{ - errors: &parseErrors{errs}, + errors: &parseErrors{common.NewErrors(source)}, helper: newParserHelper(source), macros: p.macros, maxRecursionDepth: p.maxRecursionDepth, @@ -100,7 +99,6 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit, populateMacroCalls: p.populateMacroCalls, enableOptionalSyntax: p.enableOptionalSyntax, - enableVariadicOperatorASTs: p.enableVariadicOperatorASTs, } buf, ok := source.(runes.Buffer) if !ok { @@ -117,7 +115,7 @@ func (p *Parser) Parse(source common.Source) (*exprpb.ParsedExpr, *common.Errors return &exprpb.ParsedExpr{ Expr: e, SourceInfo: impl.helper.getSourceInfo(), - }, errs + }, impl.errors.Errors } // reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid @@ -297,7 +295,6 @@ type parser struct { errorRecoveryLookaheadTokenLimit int populateMacroCalls bool enableOptionalSyntax bool - enableVariadicOperatorASTs bool } var ( @@ -360,9 +357,9 @@ func (p *parser) parse(expr runes.Buffer, desc string) *exprpb.Expr { if val := recover(); val != nil { switch err := val.(type) { case *lookaheadLimitError: - p.errors.internalError(err.Error()) + p.errors.ReportError(common.NoLocation, err.Error()) case *recursionError: - p.errors.internalError(err.Error()) + p.errors.ReportError(common.NoLocation, err.Error()) case *tooManyErrors: // do nothing case *recoveryLimitError: @@ -452,7 +449,7 @@ func (p *parser) Visit(tree antlr.ParseTree) any { // Report at least one error if the parser reaches an unknown parse element. // Typically, this happens if the parser has already encountered a syntax error elsewhere. - if p.errors.errorCount() == 0 { + if len(p.errors.GetErrors()) == 0 { txt := "<>" if t != nil { txt = fmt.Sprintf("<<%T>>", t) @@ -483,7 +480,7 @@ func (p *parser) VisitExpr(ctx *gen.ExprContext) any { // Visit a parse tree produced by CELParser#conditionalOr. func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { result := p.Visit(ctx.GetE()).(*exprpb.Expr) - l := p.newLogicManager(operators.LogicalOr, result) + b := newBalancer(p.helper, operators.LogicalOr, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { @@ -491,15 +488,15 @@ func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { } next := p.Visit(rest[i]).(*exprpb.Expr) opID := p.helper.id(op) - l.addTerm(opID, next) + b.addTerm(opID, next) } - return l.toExpr() + return b.balance() } // Visit a parse tree produced by CELParser#conditionalAnd. func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { result := p.Visit(ctx.GetE()).(*exprpb.Expr) - l := p.newLogicManager(operators.LogicalAnd, result) + b := newBalancer(p.helper, operators.LogicalAnd, result) rest := ctx.GetE1() for i, op := range ctx.GetOps() { if i >= len(rest) { @@ -507,9 +504,9 @@ func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { } next := p.Visit(rest[i]).(*exprpb.Expr) opID := p.helper.id(op) - l.addTerm(opID, next) + b.addTerm(opID, next) } - return l.toExpr() + return b.balance() } // Visit a parse tree produced by CELParser#relation. @@ -870,24 +867,18 @@ func (p *parser) unquote(ctx any, value string, isBytes bool) string { return text } -func (p *parser) newLogicManager(function string, term *exprpb.Expr) *logicManager { - if p.enableVariadicOperatorASTs { - return newVariadicLogicManager(p.helper, function, term) - } - return newBalancingLogicManager(p.helper, function, term) -} - func (p *parser) reportError(ctx any, format string, args ...any) *exprpb.Expr { var location common.Location - err := p.helper.newExpr(ctx) - switch c := ctx.(type) { + switch ctx.(type) { case common.Location: - location = c + location = ctx.(common.Location) case antlr.Token, antlr.ParserRuleContext: + err := p.helper.newExpr(ctx) location = p.helper.getLocation(err.GetId()) } + err := p.helper.newExpr(ctx) // Provide arguments to the report error. - p.errors.reportErrorAtID(err.GetId(), location, format, args...) + p.errors.ReportError(location, format, args...) return err } diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig deleted file mode 100644 index 2940ec92ac2..00000000000 --- a/vendor/github.com/gorilla/websocket/.editorconfig +++ /dev/null @@ -1,20 +0,0 @@ -; https://editorconfig.org/ - -root = true - -[*] -insert_final_newline = true -charset = utf-8 -trim_trailing_whitespace = true -indent_style = space -indent_size = 2 - -[{Makefile,go.mod,go.sum,*.go,.gitmodules}] -indent_style = tab -indent_size = 4 - -[*.md] -indent_size = 4 -trim_trailing_whitespace = false - -eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore deleted file mode 100644 index 84039fec687..00000000000 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ /dev/null @@ -1 +0,0 @@ -coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml deleted file mode 100644 index 34882139e1f..00000000000 --- a/vendor/github.com/gorilla/websocket/.golangci.yml +++ /dev/null @@ -1,3 +0,0 @@ -run: - skip-dirs: - - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE deleted file mode 100644 index bb9d80bc9b6..00000000000 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2023 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile deleted file mode 100644 index 603a63f50a3..00000000000 --- a/vendor/github.com/gorilla/websocket/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') -GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest - -GO_SEC=$(shell which gosec 2> /dev/null || echo '') -GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest - -GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') -GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest - -.PHONY: golangci-lint -golangci-lint: - $(if $(GO_LINT), ,go install $(GO_LINT_URI)) - @echo "##### Running golangci-lint" - golangci-lint run -v - -.PHONY: gosec -gosec: - $(if $(GO_SEC), ,go install $(GO_SEC_URI)) - @echo "##### Running gosec" - gosec -exclude-dir examples ./... - -.PHONY: govulncheck -govulncheck: - $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) - @echo "##### Running govulncheck" - govulncheck ./... - -.PHONY: verify -verify: golangci-lint gosec govulncheck - -.PHONY: test -test: - @echo "##### Running tests" - go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md deleted file mode 100644 index 1fd5e9c4e79..00000000000 --- a/vendor/github.com/gorilla/websocket/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# gorilla/websocket - -![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) -[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) -[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) - -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. - -![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) - - -### Documentation - -* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) -* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) -* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) -* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) -* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) -* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) - -### Status - -The Gorilla WebSocket package provides a complete and tested implementation of -the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The -package API is stable. - -### Installation - - go get github.com/gorilla/websocket - -### Protocol Compliance - -The Gorilla WebSocket package passes the server tests in the [Autobahn Test -Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn -subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go deleted file mode 100644 index 815b0ca5c8f..00000000000 --- a/vendor/github.com/gorilla/websocket/client.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - - "net" - "net/http" - "net/http/httptrace" - "net/url" - "strings" - "time" - - "golang.org/x/net/proxy" -) - -// ErrBadHandshake is returned when the server response to opening handshake is -// invalid. -var ErrBadHandshake = errors.New("websocket: bad handshake") - -var errInvalidCompression = errors.New("websocket: invalid compression negotiation") - -// NewClient creates a new client connection using the given net connection. -// The URL u specifies the host and request URI. Use requestHeader to specify -// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies -// (Cookie). Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etc. -// -// Deprecated: Use Dialer instead. -func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - d := Dialer{ - ReadBufferSize: readBufSize, - WriteBufferSize: writeBufSize, - NetDial: func(net, addr string) (net.Conn, error) { - return netConn, nil - }, - } - return d.Dial(u.String(), requestHeader) -} - -// A Dialer contains options for connecting to WebSocket server. -// -// It is safe to call Dialer's methods concurrently. -type Dialer struct { - // NetDial specifies the dial function for creating TCP connections. If - // NetDial is nil, net.Dial is used. - NetDial func(network, addr string) (net.Conn, error) - - // NetDialContext specifies the dial function for creating TCP connections. If - // NetDialContext is nil, NetDial is used. - NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If - // NetDialTLSContext is nil, NetDialContext is used. - // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and - // TLSClientConfig is ignored. - NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*http.Request) (*url.URL, error) - - // TLSClientConfig specifies the TLS configuration to use with tls.Client. - // If nil, the default configuration is used. - // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake - // is done there and TLSClientConfig is ignored. - TLSClientConfig *tls.Config - - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then a useful default size is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the client's requested subprotocols. - Subprotocols []string - - // EnableCompression specifies if the client should attempt to negotiate - // per message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar http.CookieJar -} - -// Dial creates a new client connection by calling DialContext with a background context. -func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - return d.DialContext(context.Background(), urlStr, requestHeader) -} - -var errMalformedURL = errors.New("malformed ws or wss URL") - -func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { - hostPort = u.Host - hostNoPort = u.Host - if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { - hostNoPort = hostNoPort[:i] - } else { - switch u.Scheme { - case "wss": - hostPort += ":443" - case "https": - hostPort += ":443" - default: - hostPort += ":80" - } - } - return hostPort, hostNoPort -} - -// DefaultDialer is a dialer with all fields set to the default values. -var DefaultDialer = &Dialer{ - Proxy: http.ProxyFromEnvironment, - HandshakeTimeout: 45 * time.Second, -} - -// nilDialer is dialer to use when receiver is nil. -var nilDialer = *DefaultDialer - -// DialContext creates a new client connection. Use requestHeader to specify the -// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). -// Use the response.Header to get the selected subprotocol -// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). -// -// The context will be used in the request and in the Dialer. -// -// If the WebSocket handshake fails, ErrBadHandshake is returned along with a -// non-nil *http.Response so that callers can handle redirects, authentication, -// etcetera. The response body may not contain the entire response and does not -// need to be closed by the application. -func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { - if d == nil { - d = &nilDialer - } - - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, nil, err - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - default: - return nil, nil, errMalformedURL - } - - if u.User != nil { - // User name and password are not allowed in websocket URIs. - return nil, nil, errMalformedURL - } - - req := &http.Request{ - Method: http.MethodGet, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(http.Header), - Host: u.Host, - } - req = req.WithContext(ctx) - - // Set the cookies present in the cookie jar of the dialer - if d.Jar != nil { - for _, cookie := range d.Jar.Cookies(u) { - req.AddCookie(cookie) - } - } - - // Set the request headers using the capitalization for names and values in - // RFC examples. Although the capitalization shouldn't matter, there are - // servers that depend on it. The Header.Set method is not used because the - // method canonicalizes the header names. - req.Header["Upgrade"] = []string{"websocket"} - req.Header["Connection"] = []string{"Upgrade"} - req.Header["Sec-WebSocket-Key"] = []string{challengeKey} - req.Header["Sec-WebSocket-Version"] = []string{"13"} - if len(d.Subprotocols) > 0 { - req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} - } - for k, vs := range requestHeader { - switch { - case k == "Host": - if len(vs) > 0 { - req.Host = vs[0] - } - case k == "Upgrade" || - k == "Connection" || - k == "Sec-Websocket-Key" || - k == "Sec-Websocket-Version" || - //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution - k == "Sec-Websocket-Extensions" || - (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): - return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) - case k == "Sec-Websocket-Protocol": - req.Header["Sec-WebSocket-Protocol"] = vs - default: - req.Header[k] = vs - } - } - - if d.EnableCompression { - req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} - } - - if d.HandshakeTimeout != 0 { - var cancel func() - ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) - defer cancel() - } - - // Get network dial function. - var netDial func(network, add string) (net.Conn, error) - - switch u.Scheme { - case "http": - if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - case "https": - if d.NetDialTLSContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialTLSContext(ctx, network, addr) - } - } else if d.NetDialContext != nil { - netDial = func(network, addr string) (net.Conn, error) { - return d.NetDialContext(ctx, network, addr) - } - } else if d.NetDial != nil { - netDial = d.NetDial - } - default: - return nil, nil, errMalformedURL - } - - if netDial == nil { - netDialer := &net.Dialer{} - netDial = func(network, addr string) (net.Conn, error) { - return netDialer.DialContext(ctx, network, addr) - } - } - - // If needed, wrap the dial function to set the connection deadline. - if deadline, ok := ctx.Deadline(); ok { - forwardDial := netDial - netDial = func(network, addr string) (net.Conn, error) { - c, err := forwardDial(network, addr) - if err != nil { - return nil, err - } - err = c.SetDeadline(deadline) - if err != nil { - if err := c.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - return c, nil - } - } - - // If needed, wrap the dial function to connect through a proxy. - if d.Proxy != nil { - proxyURL, err := d.Proxy(req) - if err != nil { - return nil, nil, err - } - if proxyURL != nil { - dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) - if err != nil { - return nil, nil, err - } - netDial = dialer.Dial - } - } - - hostPort, hostNoPort := hostPortNoPort(u) - trace := httptrace.ContextClientTrace(ctx) - if trace != nil && trace.GetConn != nil { - trace.GetConn(hostPort) - } - - netConn, err := netDial("tcp", hostPort) - if err != nil { - return nil, nil, err - } - if trace != nil && trace.GotConn != nil { - trace.GotConn(httptrace.GotConnInfo{ - Conn: netConn, - }) - } - - defer func() { - if netConn != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - } - }() - - if u.Scheme == "https" && d.NetDialTLSContext == nil { - // If NetDialTLSContext is set, assume that the TLS handshake has already been done - - cfg := cloneTLSConfig(d.TLSClientConfig) - if cfg.ServerName == "" { - cfg.ServerName = hostNoPort - } - tlsConn := tls.Client(netConn, cfg) - netConn = tlsConn - - if trace != nil && trace.TLSHandshakeStart != nil { - trace.TLSHandshakeStart() - } - err := doHandshake(ctx, tlsConn, cfg) - if trace != nil && trace.TLSHandshakeDone != nil { - trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) - } - - if err != nil { - return nil, nil, err - } - } - - conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) - - if err := req.Write(netConn); err != nil { - return nil, nil, err - } - - if trace != nil && trace.GotFirstResponseByte != nil { - if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { - trace.GotFirstResponseByte() - } - } - - resp, err := http.ReadResponse(conn.br, req) - if err != nil { - if d.TLSClientConfig != nil { - for _, proto := range d.TLSClientConfig.NextProtos { - if proto != "http/1.1" { - return nil, nil, fmt.Errorf( - "websocket: protocol %q was given but is not supported;"+ - "sharing tls.Config with net/http Transport can cause this error: %w", - proto, err, - ) - } - } - } - return nil, nil, err - } - - if d.Jar != nil { - if rc := resp.Cookies(); len(rc) > 0 { - d.Jar.SetCookies(u, rc) - } - } - - if resp.StatusCode != 101 || - !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || - !tokenListContainsValue(resp.Header, "Connection", "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) - return nil, resp, ErrBadHandshake - } - - for _, ext := range parseExtensions(resp.Header) { - if ext[""] != "permessage-deflate" { - continue - } - _, snct := ext["server_no_context_takeover"] - _, cnct := ext["client_no_context_takeover"] - if !snct || !cnct { - return nil, resp, errInvalidCompression - } - conn.newCompressionWriter = compressNoContextTakeover - conn.newDecompressionReader = decompressNoContextTakeover - break - } - - resp.Body = io.NopCloser(bytes.NewReader([]byte{})) - conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - - if err := netConn.SetDeadline(time.Time{}); err != nil { - return nil, nil, err - } - netConn = nil // to avoid close in defer. - return conn, resp, nil -} - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{MinVersion: tls.VersionTLS12} - } - return cfg.Clone() -} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go deleted file mode 100644 index 9fed0ef521c..00000000000 --- a/vendor/github.com/gorilla/websocket/compression.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "compress/flate" - "errors" - "io" - "log" - "strings" - "sync" -) - -const ( - minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 - maxCompressionLevel = flate.BestCompression - defaultCompressionLevel = 1 -) - -var ( - flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool - flateReaderPool = sync.Pool{New: func() interface{} { - return flate.NewReader(nil) - }} -) - -func decompressNoContextTakeover(r io.Reader) io.ReadCloser { - const tail = - // Add four bytes as specified in RFC - "\x00\x00\xff\xff" + - // Add final block to squelch unexpected EOF error from flate reader. - "\x01\x00\x00\xff\xff" - - fr, _ := flateReaderPool.Get().(io.ReadCloser) - if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { - panic(err) - } - return &flateReadWrapper{fr} -} - -func isValidCompressionLevel(level int) bool { - return minCompressionLevel <= level && level <= maxCompressionLevel -} - -func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { - p := &flateWriterPools[level-minCompressionLevel] - tw := &truncWriter{w: w} - fw, _ := p.Get().(*flate.Writer) - if fw == nil { - fw, _ = flate.NewWriter(tw, level) - } else { - fw.Reset(tw) - } - return &flateWriteWrapper{fw: fw, tw: tw, p: p} -} - -// truncWriter is an io.Writer that writes all but the last four bytes of the -// stream to another io.Writer. -type truncWriter struct { - w io.WriteCloser - n int - p [4]byte -} - -func (w *truncWriter) Write(p []byte) (int, error) { - n := 0 - - // fill buffer first for simplicity. - if w.n < len(w.p) { - n = copy(w.p[w.n:], p) - p = p[n:] - w.n += n - if len(p) == 0 { - return n, nil - } - } - - m := len(p) - if m > len(w.p) { - m = len(w.p) - } - - if nn, err := w.w.Write(w.p[:m]); err != nil { - return n + nn, err - } - - copy(w.p[:], w.p[m:]) - copy(w.p[len(w.p)-m:], p[len(p)-m:]) - nn, err := w.w.Write(p[:len(p)-m]) - return n + nn, err -} - -type flateWriteWrapper struct { - fw *flate.Writer - tw *truncWriter - p *sync.Pool -} - -func (w *flateWriteWrapper) Write(p []byte) (int, error) { - if w.fw == nil { - return 0, errWriteClosed - } - return w.fw.Write(p) -} - -func (w *flateWriteWrapper) Close() error { - if w.fw == nil { - return errWriteClosed - } - err1 := w.fw.Flush() - w.p.Put(w.fw) - w.fw = nil - if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { - return errors.New("websocket: internal error, unexpected bytes at end of flate stream") - } - err2 := w.tw.w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -type flateReadWrapper struct { - fr io.ReadCloser -} - -func (r *flateReadWrapper) Read(p []byte) (int, error) { - if r.fr == nil { - return 0, io.ErrClosedPipe - } - n, err := r.fr.Read(p) - if err == io.EOF { - // Preemptively place the reader back in the pool. This helps with - // scenarios where the application does not call NextReader() soon after - // this final read. - if err := r.Close(); err != nil { - log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) - } - } - return n, err -} - -func (r *flateReadWrapper) Close() error { - if r.fr == nil { - return io.ErrClosedPipe - } - err := r.fr.Close() - flateReaderPool.Put(r.fr) - r.fr = nil - return err -} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go deleted file mode 100644 index 221e6cf7988..00000000000 --- a/vendor/github.com/gorilla/websocket/conn.go +++ /dev/null @@ -1,1267 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "crypto/rand" - "encoding/binary" - "errors" - "io" - "log" - "net" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - // Frame header byte 0 bits from Section 5.2 of RFC 6455 - finalBit = 1 << 7 - rsv1Bit = 1 << 6 - rsv2Bit = 1 << 5 - rsv3Bit = 1 << 4 - - // Frame header byte 1 bits from Section 5.2 of RFC 6455 - maskBit = 1 << 7 - - maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask - maxControlFramePayloadSize = 125 - - writeWait = time.Second - - defaultReadBufferSize = 4096 - defaultWriteBufferSize = 4096 - - continuationFrame = 0 - noFrame = -1 -) - -// Close codes defined in RFC 6455, section 11.7. -const ( - CloseNormalClosure = 1000 - CloseGoingAway = 1001 - CloseProtocolError = 1002 - CloseUnsupportedData = 1003 - CloseNoStatusReceived = 1005 - CloseAbnormalClosure = 1006 - CloseInvalidFramePayloadData = 1007 - ClosePolicyViolation = 1008 - CloseMessageTooBig = 1009 - CloseMandatoryExtension = 1010 - CloseInternalServerErr = 1011 - CloseServiceRestart = 1012 - CloseTryAgainLater = 1013 - CloseTLSHandshake = 1015 -) - -// The message types are defined in RFC 6455, section 11.8. -const ( - // TextMessage denotes a text data message. The text message payload is - // interpreted as UTF-8 encoded text data. - TextMessage = 1 - - // BinaryMessage denotes a binary data message. - BinaryMessage = 2 - - // CloseMessage denotes a close control message. The optional message - // payload contains a numeric code and text. Use the FormatCloseMessage - // function to format a close message payload. - CloseMessage = 8 - - // PingMessage denotes a ping control message. The optional message payload - // is UTF-8 encoded text. - PingMessage = 9 - - // PongMessage denotes a pong control message. The optional message payload - // is UTF-8 encoded text. - PongMessage = 10 -) - -// ErrCloseSent is returned when the application writes a message to the -// connection after sending a close message. -var ErrCloseSent = errors.New("websocket: close sent") - -// ErrReadLimit is returned when reading a message that is larger than the -// read limit set for the connection. -var ErrReadLimit = errors.New("websocket: read limit exceeded") - -// netError satisfies the net Error interface. -type netError struct { - msg string - temporary bool - timeout bool -} - -func (e *netError) Error() string { return e.msg } -func (e *netError) Temporary() bool { return e.temporary } -func (e *netError) Timeout() bool { return e.timeout } - -// CloseError represents a close message. -type CloseError struct { - // Code is defined in RFC 6455, section 11.7. - Code int - - // Text is the optional text payload. - Text string -} - -func (e *CloseError) Error() string { - s := []byte("websocket: close ") - s = strconv.AppendInt(s, int64(e.Code), 10) - switch e.Code { - case CloseNormalClosure: - s = append(s, " (normal)"...) - case CloseGoingAway: - s = append(s, " (going away)"...) - case CloseProtocolError: - s = append(s, " (protocol error)"...) - case CloseUnsupportedData: - s = append(s, " (unsupported data)"...) - case CloseNoStatusReceived: - s = append(s, " (no status)"...) - case CloseAbnormalClosure: - s = append(s, " (abnormal closure)"...) - case CloseInvalidFramePayloadData: - s = append(s, " (invalid payload data)"...) - case ClosePolicyViolation: - s = append(s, " (policy violation)"...) - case CloseMessageTooBig: - s = append(s, " (message too big)"...) - case CloseMandatoryExtension: - s = append(s, " (mandatory extension missing)"...) - case CloseInternalServerErr: - s = append(s, " (internal server error)"...) - case CloseTLSHandshake: - s = append(s, " (TLS handshake error)"...) - } - if e.Text != "" { - s = append(s, ": "...) - s = append(s, e.Text...) - } - return string(s) -} - -// IsCloseError returns boolean indicating whether the error is a *CloseError -// with one of the specified codes. -func IsCloseError(err error, codes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// IsUnexpectedCloseError returns boolean indicating whether the error is a -// *CloseError with a code not in the list of expected codes. -func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { - if e, ok := err.(*CloseError); ok { - for _, code := range expectedCodes { - if e.Code == code { - return false - } - } - return true - } - return false -} - -var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} - errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} - errBadWriteOpCode = errors.New("websocket: bad write message type") - errWriteClosed = errors.New("websocket: write closed") - errInvalidControlFrame = errors.New("websocket: invalid control frame") -) - -// maskRand is an io.Reader for generating mask bytes. The reader is initialized -// to crypto/rand Reader. Tests swap the reader to a math/rand reader for -// reproducible results. -var maskRand = rand.Reader - -// newMaskKey returns a new 32 bit value for masking client frames. -func newMaskKey() [4]byte { - var k [4]byte - _, _ = io.ReadFull(maskRand, k[:]) - return k -} - -func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok { - err = &netError{msg: e.Error(), timeout: e.Timeout()} - } - return err -} - -func isControl(frameType int) bool { - return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage -} - -func isData(frameType int) bool { - return frameType == TextMessage || frameType == BinaryMessage -} - -var validReceivedCloseCodes = map[int]bool{ - // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number - - CloseNormalClosure: true, - CloseGoingAway: true, - CloseProtocolError: true, - CloseUnsupportedData: true, - CloseNoStatusReceived: false, - CloseAbnormalClosure: false, - CloseInvalidFramePayloadData: true, - ClosePolicyViolation: true, - CloseMessageTooBig: true, - CloseMandatoryExtension: true, - CloseInternalServerErr: true, - CloseServiceRestart: true, - CloseTryAgainLater: true, - CloseTLSHandshake: false, -} - -func isValidReceivedCloseCode(code int) bool { - return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) -} - -// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this -// interface. The type of the value stored in a pool is not specified. -type BufferPool interface { - // Get gets a value from the pool or returns nil if the pool is empty. - Get() interface{} - // Put adds a value to the pool. - Put(interface{}) -} - -// writePoolData is the type added to the write buffer pool. This wrapper is -// used to prevent applications from peeking at and depending on the values -// added to the pool. -type writePoolData struct{ buf []byte } - -// The Conn type represents a WebSocket connection. -type Conn struct { - conn net.Conn - isServer bool - subprotocol string - - // Write fields - mu chan struct{} // used as mutex to protect write to conn - writeBuf []byte // frame is constructed in this buffer. - writePool BufferPool - writeBufSize int - writeDeadline time.Time - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection - - writeErrMu sync.Mutex - writeErr error - - enableWriteCompression bool - compressionLevel int - newCompressionWriter func(io.WriteCloser, int) io.WriteCloser - - // Read fields - reader io.ReadCloser // the current reader returned to the application - readErr error - br *bufio.Reader - // bytes remaining in current frame. - // set setReadRemaining to safely update this value and prevent overflow - readRemaining int64 - readFinal bool // true the current message has more frames. - readLength int64 // Message size. - readLimit int64 // Maximum message size. - readMaskPos int - readMaskKey [4]byte - handlePong func(string) error - handlePing func(string) error - handleClose func(int, string) error - readErrCount int - messageReader *messageReader // the current low-level reader - - readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.ReadCloser -} - -func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { - - if br == nil { - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize - } else if readBufferSize < maxControlFramePayloadSize { - // must be large enough for control frame - readBufferSize = maxControlFramePayloadSize - } - br = bufio.NewReaderSize(conn, readBufferSize) - } - - if writeBufferSize <= 0 { - writeBufferSize = defaultWriteBufferSize - } - writeBufferSize += maxFrameHeaderSize - - if writeBuf == nil && writeBufferPool == nil { - writeBuf = make([]byte, writeBufferSize) - } - - mu := make(chan struct{}, 1) - mu <- struct{}{} - c := &Conn{ - isServer: isServer, - br: br, - conn: conn, - mu: mu, - readFinal: true, - writeBuf: writeBuf, - writePool: writeBufferPool, - writeBufSize: writeBufferSize, - enableWriteCompression: true, - compressionLevel: defaultCompressionLevel, - } - c.SetCloseHandler(nil) - c.SetPingHandler(nil) - c.SetPongHandler(nil) - return c -} - -// setReadRemaining tracks the number of bytes remaining on the connection. If n -// overflows, an ErrReadLimit is returned. -func (c *Conn) setReadRemaining(n int64) error { - if n < 0 { - return ErrReadLimit - } - - c.readRemaining = n - return nil -} - -// Subprotocol returns the negotiated protocol for the connection. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -// Close closes the underlying network connection without sending or waiting -// for a close message. -func (c *Conn) Close() error { - return c.conn.Close() -} - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// Write methods - -func (c *Conn) writeFatal(err error) error { - err = hideTempErr(err) - c.writeErrMu.Lock() - if c.writeErr == nil { - c.writeErr = err - } - c.writeErrMu.Unlock() - return err -} - -func (c *Conn) read(n int) ([]byte, error) { - p, err := c.br.Peek(n) - if err == io.EOF { - err = errUnexpectedEOF - } - if _, err := c.br.Discard(len(p)); err != nil { - return p, err - } - return p, err -} - -func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { - <-c.mu - defer func() { c.mu <- struct{}{} }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } - if len(buf1) == 0 { - _, err = c.conn.Write(buf0) - } else { - err = c.writeBufs(buf0, buf1) - } - if err != nil { - return c.writeFatal(err) - } - if frameType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) - } - return nil -} - -func (c *Conn) writeBufs(bufs ...[]byte) error { - b := net.Buffers(bufs) - _, err := b.WriteTo(c.conn) - return err -} - -// WriteControl writes a control message with the given deadline. The allowed -// message types are CloseMessage, PingMessage and PongMessage. -func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { - if !isControl(messageType) { - return errBadWriteOpCode - } - if len(data) > maxControlFramePayloadSize { - return errInvalidControlFrame - } - - b0 := byte(messageType) | finalBit - b1 := byte(len(data)) - if !c.isServer { - b1 |= maskBit - } - - buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) - buf = append(buf, b0, b1) - - if c.isServer { - buf = append(buf, data...) - } else { - key := newMaskKey() - buf = append(buf, key[:]...) - buf = append(buf, data...) - maskBytes(key, 0, buf[6:]) - } - - d := 1000 * time.Hour - if !deadline.IsZero() { - d = time.Until(deadline) - if d < 0 { - return errWriteTimeout - } - } - - timer := time.NewTimer(d) - select { - case <-c.mu: - timer.Stop() - case <-timer.C: - return errWriteTimeout - } - defer func() { c.mu <- struct{}{} }() - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - if err := c.conn.SetWriteDeadline(deadline); err != nil { - return c.writeFatal(err) - } - _, err = c.conn.Write(buf) - if err != nil { - return c.writeFatal(err) - } - if messageType == CloseMessage { - _ = c.writeFatal(ErrCloseSent) - } - return err -} - -// beginMessage prepares a connection and message writer for a new message. -func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { - // Close previous writer if not already closed by the application. It's - // probably better to return an error in this situation, but we cannot - // change this without breaking existing applications. - if c.writer != nil { - if err := c.writer.Close(); err != nil { - log.Printf("websocket: discarding writer close error: %v", err) - } - c.writer = nil - } - - if !isControl(messageType) && !isData(messageType) { - return errBadWriteOpCode - } - - c.writeErrMu.Lock() - err := c.writeErr - c.writeErrMu.Unlock() - if err != nil { - return err - } - - mw.c = c - mw.frameType = messageType - mw.pos = maxFrameHeaderSize - - if c.writeBuf == nil { - wpd, ok := c.writePool.Get().(writePoolData) - if ok { - c.writeBuf = wpd.buf - } else { - c.writeBuf = make([]byte, c.writeBufSize) - } - } - return nil -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -// -// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and -// PongMessage) are supported. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return nil, err - } - c.writer = &mw - if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - w := c.newCompressionWriter(c.writer, c.compressionLevel) - mw.compress = true - c.writer = w - } - return c.writer, nil -} - -type messageWriter struct { - c *Conn - compress bool // whether next call to flushFrame should set RSV1 - pos int // end of data in writeBuf. - frameType int // type of the current frame. - err error -} - -func (w *messageWriter) endMessage(err error) error { - if w.err != nil { - return err - } - c := w.c - w.err = err - c.writer = nil - if c.writePool != nil { - c.writePool.Put(writePoolData{buf: c.writeBuf}) - c.writeBuf = nil - } - return err -} - -// flushFrame writes buffered data and extra as a frame to the network. The -// final argument indicates that this is the last frame in the message. -func (w *messageWriter) flushFrame(final bool, extra []byte) error { - c := w.c - length := w.pos - maxFrameHeaderSize + len(extra) - - // Check for invalid control frames. - if isControl(w.frameType) && - (!final || length > maxControlFramePayloadSize) { - return w.endMessage(errInvalidControlFrame) - } - - b0 := byte(w.frameType) - if final { - b0 |= finalBit - } - if w.compress { - b0 |= rsv1Bit - } - w.compress = false - - b1 := byte(0) - if !c.isServer { - b1 |= maskBit - } - - // Assume that the frame starts at beginning of c.writeBuf. - framePos := 0 - if c.isServer { - // Adjust up if mask not included in the header. - framePos = 4 - } - - switch { - case length >= 65536: - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 127 - binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) - case length > 125: - framePos += 6 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | 126 - binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) - default: - framePos += 8 - c.writeBuf[framePos] = b0 - c.writeBuf[framePos+1] = b1 | byte(length) - } - - if !c.isServer { - key := newMaskKey() - copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) - if len(extra) > 0 { - return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) - } - } - - // Write the buffers to the connection with best-effort detection of - // concurrent writes. See the concurrency section in the package - // documentation for more info. - - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - - err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) - - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - - if err != nil { - return w.endMessage(err) - } - - if final { - _ = w.endMessage(errWriteClosed) - return nil - } - - // Setup for next frame. - w.pos = maxFrameHeaderSize - w.frameType = continuationFrame - return nil -} - -func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.pos - if n <= 0 { - if err := w.flushFrame(false, nil); err != nil { - return 0, err - } - n = len(w.c.writeBuf) - w.pos - } - if n > max { - n = max - } - return n, nil -} - -func (w *messageWriter) Write(p []byte) (int, error) { - if w.err != nil { - return 0, w.err - } - - if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { - // Don't buffer large messages. - err := w.flushFrame(false, p) - if err != nil { - return 0, err - } - return len(p), nil - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) WriteString(p string) (int, error) { - if w.err != nil { - return 0, w.err - } - - nn := len(p) - for len(p) > 0 { - n, err := w.ncopy(len(p)) - if err != nil { - return 0, err - } - copy(w.c.writeBuf[w.pos:], p[:n]) - w.pos += n - p = p[n:] - } - return nn, nil -} - -func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if w.err != nil { - return 0, w.err - } - for { - if w.pos == len(w.c.writeBuf) { - err = w.flushFrame(false, nil) - if err != nil { - break - } - } - var n int - n, err = r.Read(w.c.writeBuf[w.pos:]) - w.pos += n - nn += int64(n) - if err != nil { - if err == io.EOF { - err = nil - } - break - } - } - return nn, err -} - -func (w *messageWriter) Close() error { - if w.err != nil { - return w.err - } - return w.flushFrame(true, nil) -} - -// WritePreparedMessage writes prepared message into connection. -func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { - frameType, frameData, err := pm.frame(prepareKey{ - isServer: c.isServer, - compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), - compressionLevel: c.compressionLevel, - }) - if err != nil { - return err - } - if c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = true - err = c.write(frameType, c.writeDeadline, frameData, nil) - if !c.isWriting { - panic("concurrent write to websocket connection") - } - c.isWriting = false - return err -} - -// WriteMessage is a helper method for getting a writer using NextWriter, -// writing the message and closing the writer. -func (c *Conn) WriteMessage(messageType int, data []byte) error { - - if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { - // Fast path with no allocations and single frame. - - var mw messageWriter - if err := c.beginMessage(&mw, messageType); err != nil { - return err - } - n := copy(c.writeBuf[mw.pos:], data) - mw.pos += n - data = data[n:] - return mw.flushFrame(true, data) - } - - w, err := c.NextWriter(messageType) - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - return w.Close() -} - -// SetWriteDeadline sets the write deadline on the underlying network -// connection. After a write has timed out, the websocket state is corrupt and -// all future writes will return an error. A zero value for t means writes will -// not time out. -func (c *Conn) SetWriteDeadline(t time.Time) error { - c.writeDeadline = t - return nil -} - -// Read methods - -func (c *Conn) advanceFrame() (int, error) { - // 1. Skip remainder of previous frame. - - if c.readRemaining > 0 { - if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { - return noFrame, err - } - } - - // 2. Read and parse first two bytes of frame header. - // To aid debugging, collect and report all errors in the first two bytes - // of the header. - - var errors []string - - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - frameType := int(p[0] & 0xf) - final := p[0]&finalBit != 0 - rsv1 := p[0]&rsv1Bit != 0 - rsv2 := p[0]&rsv2Bit != 0 - rsv3 := p[0]&rsv3Bit != 0 - mask := p[1]&maskBit != 0 - if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { - return noFrame, err - } - - c.readDecompress = false - if rsv1 { - if c.newDecompressionReader != nil { - c.readDecompress = true - } else { - errors = append(errors, "RSV1 set") - } - } - - if rsv2 { - errors = append(errors, "RSV2 set") - } - - if rsv3 { - errors = append(errors, "RSV3 set") - } - - switch frameType { - case CloseMessage, PingMessage, PongMessage: - if c.readRemaining > maxControlFramePayloadSize { - errors = append(errors, "len > 125 for control") - } - if !final { - errors = append(errors, "FIN not set on control") - } - case TextMessage, BinaryMessage: - if !c.readFinal { - errors = append(errors, "data before FIN") - } - c.readFinal = final - case continuationFrame: - if c.readFinal { - errors = append(errors, "continuation after FIN") - } - c.readFinal = final - default: - errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) - } - - if mask != c.isServer { - errors = append(errors, "bad MASK") - } - - if len(errors) > 0 { - return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) - } - - // 3. Read and parse frame length as per - // https://tools.ietf.org/html/rfc6455#section-5.2 - // - // The length of the "Payload data", in bytes: if 0-125, that is the payload - // length. - // - If 126, the following 2 bytes interpreted as a 16-bit unsigned - // integer are the payload length. - // - If 127, the following 8 bytes interpreted as - // a 64-bit unsigned integer (the most significant bit MUST be 0) are the - // payload length. Multibyte length quantities are expressed in network byte - // order. - - switch c.readRemaining { - case 126: - p, err := c.read(2) - if err != nil { - return noFrame, err - } - - if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { - return noFrame, err - } - case 127: - p, err := c.read(8) - if err != nil { - return noFrame, err - } - - if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { - return noFrame, err - } - } - - // 4. Handle frame masking. - - if mask { - c.readMaskPos = 0 - p, err := c.read(len(c.readMaskKey)) - if err != nil { - return noFrame, err - } - copy(c.readMaskKey[:], p) - } - - // 5. For text and binary messages, enforce read limit and return. - - if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { - - c.readLength += c.readRemaining - // Don't allow readLength to overflow in the presence of a large readRemaining - // counter. - if c.readLength < 0 { - return noFrame, ErrReadLimit - } - - if c.readLimit > 0 && c.readLength > c.readLimit { - if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { - return noFrame, err - } - return noFrame, ErrReadLimit - } - - return frameType, nil - } - - // 6. Read control frame payload. - - var payload []byte - if c.readRemaining > 0 { - payload, err = c.read(int(c.readRemaining)) - if err := c.setReadRemaining(0); err != nil { - return noFrame, err - } - if err != nil { - return noFrame, err - } - if c.isServer { - maskBytes(c.readMaskKey, 0, payload) - } - } - - // 7. Process control frame payload. - - switch frameType { - case PongMessage: - if err := c.handlePong(string(payload)); err != nil { - return noFrame, err - } - case PingMessage: - if err := c.handlePing(string(payload)); err != nil { - return noFrame, err - } - case CloseMessage: - closeCode := CloseNoStatusReceived - closeText := "" - if len(payload) >= 2 { - closeCode = int(binary.BigEndian.Uint16(payload)) - if !isValidReceivedCloseCode(closeCode) { - return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) - } - closeText = string(payload[2:]) - if !utf8.ValidString(closeText) { - return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") - } - } - if err := c.handleClose(closeCode, closeText); err != nil { - return noFrame, err - } - return noFrame, &CloseError{Code: closeCode, Text: closeText} - } - - return frameType, nil -} - -func (c *Conn) handleProtocolError(message string) error { - data := FormatCloseMessage(CloseProtocolError, message) - if len(data) > maxControlFramePayloadSize { - data = data[:maxControlFramePayloadSize] - } - if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { - return err - } - return errors.New("websocket: " + message) -} - -// NextReader returns the next data message received from the peer. The -// returned messageType is either TextMessage or BinaryMessage. -// -// There can be at most one open reader on a connection. NextReader discards -// the previous message if the application has not already consumed it. -// -// Applications must break out of the application's read loop when this method -// returns a non-nil error value. Errors returned from this method are -// permanent. Once this method returns a non-nil error, all subsequent calls to -// this method return the same error. -func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { - // Close previous reader, only relevant for decompression. - if c.reader != nil { - if err := c.reader.Close(); err != nil { - log.Printf("websocket: discarding reader close error: %v", err) - } - c.reader = nil - } - - c.messageReader = nil - c.readLength = 0 - - for c.readErr == nil { - frameType, err := c.advanceFrame() - if err != nil { - c.readErr = hideTempErr(err) - break - } - - if frameType == TextMessage || frameType == BinaryMessage { - c.messageReader = &messageReader{c} - c.reader = c.messageReader - if c.readDecompress { - c.reader = c.newDecompressionReader(c.reader) - } - return frameType, c.reader, nil - } - } - - // Applications that do handle the error returned from this method spin in - // tight loop on connection failure. To help application developers detect - // this error, panic on repeated reads to the failed connection. - c.readErrCount++ - if c.readErrCount >= 1000 { - panic("repeated read on failed websocket connection") - } - - return noFrame, nil, c.readErr -} - -type messageReader struct{ c *Conn } - -func (r *messageReader) Read(b []byte) (int, error) { - c := r.c - if c.messageReader != r { - return 0, io.EOF - } - - for c.readErr == nil { - - if c.readRemaining > 0 { - if int64(len(b)) > c.readRemaining { - b = b[:c.readRemaining] - } - n, err := c.br.Read(b) - c.readErr = hideTempErr(err) - if c.isServer { - c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) - } - rem := c.readRemaining - rem -= int64(n) - if err := c.setReadRemaining(rem); err != nil { - return 0, err - } - if c.readRemaining > 0 && c.readErr == io.EOF { - c.readErr = errUnexpectedEOF - } - return n, c.readErr - } - - if c.readFinal { - c.messageReader = nil - return 0, io.EOF - } - - frameType, err := c.advanceFrame() - switch { - case err != nil: - c.readErr = hideTempErr(err) - case frameType == TextMessage || frameType == BinaryMessage: - c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") - } - } - - err := c.readErr - if err == io.EOF && c.messageReader == r { - err = errUnexpectedEOF - } - return 0, err -} - -func (r *messageReader) Close() error { - return nil -} - -// ReadMessage is a helper method for getting a reader using NextReader and -// reading from that reader to a buffer. -func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { - var r io.Reader - messageType, r, err = c.NextReader() - if err != nil { - return messageType, nil, err - } - p, err = io.ReadAll(r) - return messageType, p, err -} - -// SetReadDeadline sets the read deadline on the underlying network connection. -// After a read has timed out, the websocket connection state is corrupt and -// all future reads will return an error. A zero value for t means reads will -// not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a -// message exceeds the limit, the connection sends a close message to the peer -// and returns ErrReadLimit to the application. -func (c *Conn) SetReadLimit(limit int64) { - c.readLimit = limit -} - -// CloseHandler returns the current close handler -func (c *Conn) CloseHandler() func(code int, text string) error { - return c.handleClose -} - -// SetCloseHandler sets the handler for close messages received from the peer. -// The code argument to h is the received close code or CloseNoStatusReceived -// if the close message is empty. The default close handler sends a close -// message back to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// close messages as described in the section on Control Messages above. -// -// The connection read methods return a CloseError when a close message is -// received. Most applications should handle close messages as part of their -// normal error handling. Applications should only set a close handler when the -// application must perform some action before sending a close message back to -// the peer. -func (c *Conn) SetCloseHandler(h func(code int, text string) error) { - if h == nil { - h = func(code int, text string) error { - message := FormatCloseMessage(code, "") - if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { - return err - } - return nil - } - } - c.handleClose = h -} - -// PingHandler returns the current ping handler -func (c *Conn) PingHandler() func(appData string) error { - return c.handlePing -} - -// SetPingHandler sets the handler for ping messages received from the peer. -// The appData argument to h is the PING message application data. The default -// ping handler sends a pong to the peer. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// ping messages as described in the section on Control Messages above. -func (c *Conn) SetPingHandler(h func(appData string) error) { - if h == nil { - h = func(message string) error { - err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - if err == ErrCloseSent { - return nil - } else if _, ok := err.(net.Error); ok { - return nil - } - return err - } - } - c.handlePing = h -} - -// PongHandler returns the current pong handler -func (c *Conn) PongHandler() func(appData string) error { - return c.handlePong -} - -// SetPongHandler sets the handler for pong messages received from the peer. -// The appData argument to h is the PONG message application data. The default -// pong handler does nothing. -// -// The handler function is called from the NextReader, ReadMessage and message -// reader Read methods. The application must read the connection to process -// pong messages as described in the section on Control Messages above. -func (c *Conn) SetPongHandler(h func(appData string) error) { - if h == nil { - h = func(string) error { return nil } - } - c.handlePong = h -} - -// NetConn returns the underlying connection that is wrapped by c. -// Note that writing to or reading from this connection directly will corrupt the -// WebSocket connection. -func (c *Conn) NetConn() net.Conn { - return c.conn -} - -// UnderlyingConn returns the internal net.Conn. This can be used to further -// modifications to connection specific flags. -// Deprecated: Use the NetConn method. -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -// EnableWriteCompression enables and disables write compression of -// subsequent text and binary messages. This function is a noop if -// compression was not negotiated with the peer. -func (c *Conn) EnableWriteCompression(enable bool) { - c.enableWriteCompression = enable -} - -// SetCompressionLevel sets the flate compression level for subsequent text and -// binary messages. This function is a noop if compression was not negotiated -// with the peer. See the compress/flate package for a description of -// compression levels. -func (c *Conn) SetCompressionLevel(level int) error { - if !isValidCompressionLevel(level) { - return errors.New("websocket: invalid compression level") - } - c.compressionLevel = level - return nil -} - -// FormatCloseMessage formats closeCode and text as a WebSocket close message. -// An empty message is returned for code CloseNoStatusReceived. -func FormatCloseMessage(closeCode int, text string) []byte { - if closeCode == CloseNoStatusReceived { - // Return empty message because it's illegal to send - // CloseNoStatusReceived. Return non-nil value in case application - // checks for nil. - return []byte{} - } - buf := make([]byte, 2+len(text)) - binary.BigEndian.PutUint16(buf, uint16(closeCode)) - copy(buf[2:], text) - return buf -} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go deleted file mode 100644 index 8db0cef95a2..00000000000 --- a/vendor/github.com/gorilla/websocket/doc.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package websocket implements the WebSocket protocol defined in RFC 6455. -// -// Overview -// -// The Conn type represents a WebSocket connection. A server application calls -// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: -// -// var upgrader = websocket.Upgrader{ -// ReadBufferSize: 1024, -// WriteBufferSize: 1024, -// } -// -// func handler(w http.ResponseWriter, r *http.Request) { -// conn, err := upgrader.Upgrade(w, r, nil) -// if err != nil { -// log.Println(err) -// return -// } -// ... Use conn to send and receive messages. -// } -// -// Call the connection's WriteMessage and ReadMessage methods to send and -// receive messages as a slice of bytes. This snippet of code shows how to echo -// messages using these methods: -// -// for { -// messageType, p, err := conn.ReadMessage() -// if err != nil { -// log.Println(err) -// return -// } -// if err := conn.WriteMessage(messageType, p); err != nil { -// log.Println(err) -// return -// } -// } -// -// In above snippet of code, p is a []byte and messageType is an int with value -// websocket.BinaryMessage or websocket.TextMessage. -// -// An application can also send and receive messages using the io.WriteCloser -// and io.Reader interfaces. To send a message, call the connection NextWriter -// method to get an io.WriteCloser, write the message to the writer and close -// the writer when done. To receive a message, call the connection NextReader -// method to get an io.Reader and read until io.EOF is returned. This snippet -// shows how to echo messages using the NextWriter and NextReader methods: -// -// for { -// messageType, r, err := conn.NextReader() -// if err != nil { -// return -// } -// w, err := conn.NextWriter(messageType) -// if err != nil { -// return err -// } -// if _, err := io.Copy(w, r); err != nil { -// return err -// } -// if err := w.Close(); err != nil { -// return err -// } -// } -// -// Data Messages -// -// The WebSocket protocol distinguishes between text and binary data messages. -// Text messages are interpreted as UTF-8 encoded text. The interpretation of -// binary messages is left to the application. -// -// This package uses the TextMessage and BinaryMessage integer constants to -// identify the two data message types. The ReadMessage and NextReader methods -// return the type of the received message. The messageType argument to the -// WriteMessage and NextWriter methods specifies the type of a sent message. -// -// It is the application's responsibility to ensure that text messages are -// valid UTF-8 encoded text. -// -// Control Messages -// -// The WebSocket protocol defines three types of control messages: close, ping -// and pong. Call the connection WriteControl, WriteMessage or NextWriter -// methods to send a control message to the peer. -// -// Connections handle received close messages by calling the handler function -// set with the SetCloseHandler method and by returning a *CloseError from the -// NextReader, ReadMessage or the message Read method. The default close -// handler sends a close message to the peer. -// -// Connections handle received ping messages by calling the handler function -// set with the SetPingHandler method. The default ping handler sends a pong -// message to the peer. -// -// Connections handle received pong messages by calling the handler function -// set with the SetPongHandler method. The default pong handler does nothing. -// If an application sends ping messages, then the application should set a -// pong handler to receive the corresponding pong. -// -// The control message handler functions are called from the NextReader, -// ReadMessage and message reader Read methods. The default close and ping -// handlers can block these methods for a short time when the handler writes to -// the connection. -// -// The application must read the connection to process close, ping and pong -// messages sent from the peer. If the application is not otherwise interested -// in messages from the peer, then the application should start a goroutine to -// read and discard messages from the peer. A simple example is: -// -// func readLoop(c *websocket.Conn) { -// for { -// if _, _, err := c.NextReader(); err != nil { -// c.Close() -// break -// } -// } -// } -// -// Concurrency -// -// Connections support one concurrent reader and one concurrent writer. -// -// Applications are responsible for ensuring that no more than one goroutine -// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and -// that no more than one goroutine calls the read methods (NextReader, -// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) -// concurrently. -// -// The Close and WriteControl methods can be called concurrently with all other -// methods. -// -// Origin Considerations -// -// Web browsers allow Javascript applications to open a WebSocket connection to -// any host. It's up to the server to enforce an origin policy using the Origin -// request header sent by the browser. -// -// The Upgrader calls the function specified in the CheckOrigin field to check -// the origin. If the CheckOrigin function returns false, then the Upgrade -// method fails the WebSocket handshake with HTTP status 403. -// -// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail -// the handshake if the Origin request header is present and the Origin host is -// not equal to the Host request header. -// -// The deprecated package-level Upgrade function does not perform origin -// checking. The application is responsible for checking the Origin header -// before calling the Upgrade function. -// -// Buffers -// -// Connections buffer network input and output to reduce the number -// of system calls when reading or writing messages. -// -// Write buffers are also used for constructing WebSocket frames. See RFC 6455, -// Section 5 for a discussion of message framing. A WebSocket frame header is -// written to the network each time a write buffer is flushed to the network. -// Decreasing the size of the write buffer can increase the amount of framing -// overhead on the connection. -// -// The buffer sizes in bytes are specified by the ReadBufferSize and -// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default -// size of 4096 when a buffer size field is set to zero. The Upgrader reuses -// buffers created by the HTTP server when a buffer size field is set to zero. -// The HTTP server buffers have a size of 4096 at the time of this writing. -// -// The buffer sizes do not limit the size of a message that can be read or -// written by a connection. -// -// Buffers are held for the lifetime of the connection by default. If the -// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the -// write buffer only when writing a message. -// -// Applications should tune the buffer sizes to balance memory use and -// performance. Increasing the buffer size uses more memory, but can reduce the -// number of system calls to read or write the network. In the case of writing, -// increasing the buffer size can reduce the number of frame headers written to -// the network. -// -// Some guidelines for setting buffer parameters are: -// -// Limit the buffer sizes to the maximum expected message size. Buffers larger -// than the largest message do not provide any benefit. -// -// Depending on the distribution of message sizes, setting the buffer size to -// a value less than the maximum expected message size can greatly reduce memory -// use with a small impact on performance. Here's an example: If 99% of the -// messages are smaller than 256 bytes and the maximum message size is 512 -// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls -// than a buffer size of 512 bytes. The memory savings is 50%. -// -// A write buffer pool is useful when the application has a modest number -// writes over a large number of connections. when buffers are pooled, a larger -// buffer size has a reduced impact on total memory use and has the benefit of -// reducing system calls and frame overhead. -// -// Compression EXPERIMENTAL -// -// Per message compression extensions (RFC 7692) are experimentally supported -// by this package in a limited capacity. Setting the EnableCompression option -// to true in Dialer or Upgrader will attempt to negotiate per message deflate -// support. -// -// var upgrader = websocket.Upgrader{ -// EnableCompression: true, -// } -// -// If compression was successfully negotiated with the connection's peer, any -// message received in compressed form will be automatically decompressed. -// All Read methods will return uncompressed bytes. -// -// Per message compression of messages written to a connection can be enabled -// or disabled by calling the corresponding Conn method: -// -// conn.EnableWriteCompression(false) -// -// Currently this package does not support compression with "context takeover". -// This means that messages must be compressed and decompressed in isolation, -// without retaining sliding window or dictionary state across messages. For -// more details refer to RFC 7692. -// -// Use of compression is experimental and may result in decreased performance. -package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go deleted file mode 100644 index c64f8c82901..00000000000 --- a/vendor/github.com/gorilla/websocket/join.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "io" - "strings" -) - -// JoinMessages concatenates received messages to create a single io.Reader. -// The string term is appended to each message. The returned reader does not -// support concurrent calls to the Read method. -func JoinMessages(c *Conn, term string) io.Reader { - return &joinReader{c: c, term: term} -} - -type joinReader struct { - c *Conn - term string - r io.Reader -} - -func (r *joinReader) Read(p []byte) (int, error) { - if r.r == nil { - var err error - _, r.r, err = r.c.NextReader() - if err != nil { - return 0, err - } - if r.term != "" { - r.r = io.MultiReader(r.r, strings.NewReader(r.term)) - } - } - n, err := r.r.Read(p) - if err == io.EOF { - err = nil - r.r = nil - } - return n, err -} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go deleted file mode 100644 index dc2c1f6415f..00000000000 --- a/vendor/github.com/gorilla/websocket/json.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "encoding/json" - "io" -) - -// WriteJSON writes the JSON encoding of v as a message. -// -// Deprecated: Use c.WriteJSON instead. -func WriteJSON(c *Conn, v interface{}) error { - return c.WriteJSON(v) -} - -// WriteJSON writes the JSON encoding of v as a message. -// -// See the documentation for encoding/json Marshal for details about the -// conversion of Go values to JSON. -func (c *Conn) WriteJSON(v interface{}) error { - w, err := c.NextWriter(TextMessage) - if err != nil { - return err - } - err1 := json.NewEncoder(w).Encode(v) - err2 := w.Close() - if err1 != nil { - return err1 - } - return err2 -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// Deprecated: Use c.ReadJSON instead. -func ReadJSON(c *Conn, v interface{}) error { - return c.ReadJSON(v) -} - -// ReadJSON reads the next JSON-encoded message from the connection and stores -// it in the value pointed to by v. -// -// See the documentation for the encoding/json Unmarshal function for details -// about the conversion of JSON to a Go value. -func (c *Conn) ReadJSON(v interface{}) error { - _, r, err := c.NextReader() - if err != nil { - return err - } - err = json.NewDecoder(r).Decode(v) - if err == io.EOF { - // One value is expected in the message. - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go deleted file mode 100644 index 67d0968be83..00000000000 --- a/vendor/github.com/gorilla/websocket/mask.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -//go:build !appengine -// +build !appengine - -package websocket - -import "unsafe" - -// #nosec G103 -- (CWE-242) Has been audited -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -func maskBytes(key [4]byte, pos int, b []byte) int { - // Mask one byte at a time for small buffers. - if len(b) < 2*wordSize { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 - } - - // Mask one byte at a time to word boundary. - //#nosec G103 -- (CWE-242) Has been audited - if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { - n = wordSize - n - for i := range b[:n] { - b[i] ^= key[pos&3] - pos++ - } - b = b[n:] - } - - // Create aligned word size key. - var k [wordSize]byte - for i := range k { - k[i] = key[(pos+i)&3] - } - //#nosec G103 -- (CWE-242) Has been audited - kw := *(*uintptr)(unsafe.Pointer(&k)) - - // Mask one word at a time. - n := (len(b) / wordSize) * wordSize - for i := 0; i < n; i += wordSize { - //#nosec G103 -- (CWE-242) Has been audited - *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw - } - - // Mask one byte at a time for remaining bytes. - b = b[n:] - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go deleted file mode 100644 index 36250ca7c47..00000000000 --- a/vendor/github.com/gorilla/websocket/mask_safe.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of -// this source code is governed by a BSD-style license that can be found in the -// LICENSE file. - -//go:build appengine -// +build appengine - -package websocket - -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go deleted file mode 100644 index c854225e967..00000000000 --- a/vendor/github.com/gorilla/websocket/prepared.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bytes" - "net" - "sync" - "time" -) - -// PreparedMessage caches on the wire representations of a message payload. -// Use PreparedMessage to efficiently send a message payload to multiple -// connections. PreparedMessage is especially useful when compression is used -// because the CPU and memory expensive compression operation can be executed -// once for a given set of compression options. -type PreparedMessage struct { - messageType int - data []byte - mu sync.Mutex - frames map[prepareKey]*preparedFrame -} - -// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. -type prepareKey struct { - isServer bool - compress bool - compressionLevel int -} - -// preparedFrame contains data in wire representation. -type preparedFrame struct { - once sync.Once - data []byte -} - -// NewPreparedMessage returns an initialized PreparedMessage. You can then send -// it to connection using WritePreparedMessage method. Valid wire -// representation will be calculated lazily only once for a set of current -// connection options. -func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { - pm := &PreparedMessage{ - messageType: messageType, - frames: make(map[prepareKey]*preparedFrame), - data: data, - } - - // Prepare a plain server frame. - _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) - if err != nil { - return nil, err - } - - // To protect against caller modifying the data argument, remember the data - // copied to the plain server frame. - pm.data = frameData[len(frameData)-len(data):] - return pm, nil -} - -func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { - pm.mu.Lock() - frame, ok := pm.frames[key] - if !ok { - frame = &preparedFrame{} - pm.frames[key] = frame - } - pm.mu.Unlock() - - var err error - frame.once.Do(func() { - // Prepare a frame using a 'fake' connection. - // TODO: Refactor code in conn.go to allow more direct construction of - // the frame. - mu := make(chan struct{}, 1) - mu <- struct{}{} - var nc prepareConn - c := &Conn{ - conn: &nc, - mu: mu, - isServer: key.isServer, - compressionLevel: key.compressionLevel, - enableWriteCompression: true, - writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), - } - if key.compress { - c.newCompressionWriter = compressNoContextTakeover - } - err = c.WriteMessage(pm.messageType, pm.data) - frame.data = nc.buf.Bytes() - }) - return pm.messageType, frame.data, err -} - -type prepareConn struct { - buf bytes.Buffer - net.Conn -} - -func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } -func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go deleted file mode 100644 index 80f55d1eacc..00000000000 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "encoding/base64" - "errors" - "log" - "net" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/proxy" -) - -type netDialerFunc func(network, addr string) (net.Conn, error) - -func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { - return fn(network, addr) -} - -func init() { - proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { - return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil - }) -} - -type httpProxyDialer struct { - proxyURL *url.URL - forwardDial func(network, addr string) (net.Conn, error) -} - -func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { - hostPort, _ := hostPortNoPort(hpd.proxyURL) - conn, err := hpd.forwardDial(network, hostPort) - if err != nil { - return nil, err - } - - connectHeader := make(http.Header) - if user := hpd.proxyURL.User; user != nil { - proxyUser := user.Username() - if proxyPassword, passwordSet := user.Password(); passwordSet { - credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) - connectHeader.Set("Proxy-Authorization", "Basic "+credential) - } - } - - connectReq := &http.Request{ - Method: http.MethodConnect, - URL: &url.URL{Opaque: addr}, - Host: addr, - Header: connectHeader, - } - - if err := connectReq.Write(conn); err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } - return nil, err - } - - // Read response. It's OK to use and discard buffered reader here becaue - // the remote server does not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := http.ReadResponse(br, connectReq) - if err != nil { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } - return nil, err - } - - if resp.StatusCode != 200 { - if err := conn.Close(); err != nil { - log.Printf("httpProxyDialer: failed to close connection: %v", err) - } - f := strings.SplitN(resp.Status, " ", 2) - return nil, errors.New(f[1]) - } - return conn, nil -} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go deleted file mode 100644 index 1e720e1da47..00000000000 --- a/vendor/github.com/gorilla/websocket/server.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "bufio" - "errors" - "io" - "log" - "net/http" - "net/url" - "strings" - "time" -) - -// HandshakeError describes an error with the handshake from the peer. -type HandshakeError struct { - message string -} - -func (e HandshakeError) Error() string { return e.message } - -// Upgrader specifies parameters for upgrading an HTTP connection to a -// WebSocket connection. -// -// It is safe to call Upgrader's methods concurrently. -type Upgrader struct { - // HandshakeTimeout specifies the duration for the handshake to complete. - HandshakeTimeout time.Duration - - // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer - // size is zero, then buffers allocated by the HTTP server are used. The - // I/O buffer sizes do not limit the size of the messages that can be sent - // or received. - ReadBufferSize, WriteBufferSize int - - // WriteBufferPool is a pool of buffers for write operations. If the value - // is not set, then write buffers are allocated to the connection for the - // lifetime of the connection. - // - // A pool is most useful when the application has a modest volume of writes - // across a large number of connections. - // - // Applications should use a single pool for each unique value of - // WriteBufferSize. - WriteBufferPool BufferPool - - // Subprotocols specifies the server's supported protocols in order of - // preference. If this field is not nil, then the Upgrade method negotiates a - // subprotocol by selecting the first match in this list with a protocol - // requested by the client. If there's no match, then no protocol is - // negotiated (the Sec-Websocket-Protocol header is not included in the - // handshake response). - Subprotocols []string - - // Error specifies the function for generating HTTP error responses. If Error - // is nil, then http.Error is used to generate the HTTP response. - Error func(w http.ResponseWriter, r *http.Request, status int, reason error) - - // CheckOrigin returns true if the request Origin header is acceptable. If - // CheckOrigin is nil, then a safe default is used: return false if the - // Origin request header is present and the origin host is not equal to - // request Host header. - // - // A CheckOrigin function should carefully validate the request origin to - // prevent cross-site request forgery. - CheckOrigin func(r *http.Request) bool - - // EnableCompression specify if the server should attempt to negotiate per - // message compression (RFC 7692). Setting this value to true does not - // guarantee that compression will be supported. Currently only "no context - // takeover" modes are supported. - EnableCompression bool -} - -func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { - err := HandshakeError{reason} - if u.Error != nil { - u.Error(w, r, status, err) - } else { - w.Header().Set("Sec-Websocket-Version", "13") - http.Error(w, http.StatusText(status), status) - } - return nil, err -} - -// checkSameOrigin returns true if the origin is not set or is equal to the request host. -func checkSameOrigin(r *http.Request) bool { - origin := r.Header["Origin"] - if len(origin) == 0 { - return true - } - u, err := url.Parse(origin[0]) - if err != nil { - return false - } - return equalASCIIFold(u.Host, r.Host) -} - -func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { - if u.Subprotocols != nil { - clientProtocols := Subprotocols(r) - for _, serverProtocol := range u.Subprotocols { - for _, clientProtocol := range clientProtocols { - if clientProtocol == serverProtocol { - return clientProtocol - } - } - } - } else if responseHeader != nil { - return responseHeader.Get("Sec-Websocket-Protocol") - } - return "" -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie). To specify -// subprotocols supported by the server, set Upgrader.Subprotocols directly. -// -// If the upgrade fails, then Upgrade replies to the client with an HTTP error -// response. -func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - const badHandshake = "websocket: the client is not using the websocket protocol: " - - if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") - } - - if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") - } - - if r.Method != http.MethodGet { - return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") - } - - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") - } - - if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") - } - - checkOrigin := u.CheckOrigin - if checkOrigin == nil { - checkOrigin = checkSameOrigin - } - if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") - } - - challengeKey := r.Header.Get("Sec-Websocket-Key") - if !isValidChallengeKey(challengeKey) { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") - } - - subprotocol := u.selectSubprotocol(r, responseHeader) - - // Negotiate PMCE - var compress bool - if u.EnableCompression { - for _, ext := range parseExtensions(r.Header) { - if ext[""] != "permessage-deflate" { - continue - } - compress = true - break - } - } - - h, ok := w.(http.Hijacker) - if !ok { - return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") - } - var brw *bufio.ReadWriter - netConn, brw, err := h.Hijack() - if err != nil { - return u.returnError(w, r, http.StatusInternalServerError, err.Error()) - } - - if brw.Reader.Buffered() > 0 { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, errors.New("websocket: client sent data before handshake is complete") - } - - var br *bufio.Reader - if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { - // Reuse hijacked buffered reader as connection reader. - br = brw.Reader - } - - buf := bufioWriterBuffer(netConn, brw.Writer) - - var writeBuf []byte - if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { - // Reuse hijacked write buffer as connection buffer. - writeBuf = buf - } - - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) - c.subprotocol = subprotocol - - if compress { - c.newCompressionWriter = compressNoContextTakeover - c.newDecompressionReader = decompressNoContextTakeover - } - - // Use larger of hijacked buffer and connection write buffer for header. - p := buf - if len(c.writeBuf) > len(p) { - p = c.writeBuf - } - p = p[:0] - - p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) - p = append(p, computeAcceptKey(challengeKey)...) - p = append(p, "\r\n"...) - if c.subprotocol != "" { - p = append(p, "Sec-WebSocket-Protocol: "...) - p = append(p, c.subprotocol...) - p = append(p, "\r\n"...) - } - if compress { - p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) - } - for k, vs := range responseHeader { - if k == "Sec-Websocket-Protocol" { - continue - } - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - for i := 0; i < len(v); i++ { - b := v[i] - if b <= 31 { - // prevent response splitting. - b = ' ' - } - p = append(p, b) - } - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - // Clear deadlines set by HTTP server. - if err := netConn.SetDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - - if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - } - if _, err = netConn.Write(p); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - if u.HandshakeTimeout > 0 { - if err := netConn.SetWriteDeadline(time.Time{}); err != nil { - if err := netConn.Close(); err != nil { - log.Printf("websocket: failed to close network connection: %v", err) - } - return nil, err - } - } - - return c, nil -} - -// Upgrade upgrades the HTTP server connection to the WebSocket protocol. -// -// Deprecated: Use websocket.Upgrader instead. -// -// Upgrade does not perform origin checking. The application is responsible for -// checking the Origin header before calling Upgrade. An example implementation -// of the same origin policy check is: -// -// if req.Header.Get("Origin") != "http://"+req.Host { -// http.Error(w, "Origin not allowed", http.StatusForbidden) -// return -// } -// -// If the endpoint supports subprotocols, then the application is responsible -// for negotiating the protocol used on the connection. Use the Subprotocols() -// function to get the subprotocols requested by the client. Use the -// Sec-Websocket-Protocol response header to specify the subprotocol selected -// by the application. -// -// The responseHeader is included in the response to the client's upgrade -// request. Use the responseHeader to specify cookies (Set-Cookie) and the -// negotiated subprotocol (Sec-Websocket-Protocol). -// -// The connection buffers IO to the underlying network connection. The -// readBufSize and writeBufSize parameters specify the size of the buffers to -// use. Messages can be larger than the buffers. -// -// If the request is not a valid WebSocket handshake, then Upgrade returns an -// error of type HandshakeError. Applications should handle this error by -// replying to the client with an HTTP error response. -func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { - u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} - u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { - // don't return errors to maintain backwards compatibility - } - u.CheckOrigin = func(r *http.Request) bool { - // allow all connections by default - return true - } - return u.Upgrade(w, r, responseHeader) -} - -// Subprotocols returns the subprotocols requested by the client in the -// Sec-Websocket-Protocol header. -func Subprotocols(r *http.Request) []string { - h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) - if h == "" { - return nil - } - protocols := strings.Split(h, ",") - for i := range protocols { - protocols[i] = strings.TrimSpace(protocols[i]) - } - return protocols -} - -// IsWebSocketUpgrade returns true if the client requested upgrade to the -// WebSocket protocol. -func IsWebSocketUpgrade(r *http.Request) bool { - return tokenListContainsValue(r.Header, "Connection", "upgrade") && - tokenListContainsValue(r.Header, "Upgrade", "websocket") -} - -// bufioReaderSize size returns the size of a bufio.Reader. -func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { - // This code assumes that peek on a reset reader returns - // bufio.Reader.buf[:0]. - // TODO: Use bufio.Reader.Size() after Go 1.10 - br.Reset(originalReader) - if p, err := br.Peek(0); err == nil { - return cap(p) - } - return 0 -} - -// writeHook is an io.Writer that records the last slice passed to it vio -// io.Writer.Write. -type writeHook struct { - p []byte -} - -func (wh *writeHook) Write(p []byte) (int, error) { - wh.p = p - return len(p), nil -} - -// bufioWriterBuffer grabs the buffer from a bufio.Writer. -func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { - // This code assumes that bufio.Writer.buf[:1] is passed to the - // bufio.Writer's underlying writer. - var wh writeHook - bw.Reset(&wh) - if err := bw.WriteByte(0); err != nil { - panic(err) - } - if err := bw.Flush(); err != nil { - log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) - } - - bw.Reset(originalWriter) - - return wh.p[:cap(wh.p)] -} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go deleted file mode 100644 index 7f386453481..00000000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ /dev/null @@ -1,18 +0,0 @@ -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.HandshakeContext(ctx); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go deleted file mode 100644 index 9b1a629bff4..00000000000 --- a/vendor/github.com/gorilla/websocket/util.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package websocket - -import ( - "crypto/rand" - "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 - "encoding/base64" - "io" - "net/http" - "strings" - "unicode/utf8" -) - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func computeAcceptKey(challengeKey string) string { - h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 - h.Write([]byte(challengeKey)) - h.Write(keyGUID) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func generateChallengeKey() (string, error) { - p := make([]byte, 16) - if _, err := io.ReadFull(rand.Reader, p); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(p), nil -} - -// Token octets per RFC 2616. -var isTokenOctet = [256]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -// skipSpace returns a slice of the string s with all leading RFC 2616 linear -// whitespace removed. -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if b := s[i]; b != ' ' && b != '\t' { - break - } - } - return s[i:] -} - -// nextToken returns the leading RFC 2616 token of s and the string following -// the token. -func nextToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if !isTokenOctet[s[i]] { - break - } - } - return s[:i], s[i:] -} - -// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 -// and the string following the token or quoted string. -func nextTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return nextToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} - -// equalASCIIFold returns true if s is equal to t with ASCII case folding as -// defined in RFC 4790. -func equalASCIIFold(s, t string) bool { - for s != "" && t != "" { - sr, size := utf8.DecodeRuneInString(s) - s = s[size:] - tr, size := utf8.DecodeRuneInString(t) - t = t[size:] - if sr == tr { - continue - } - if 'A' <= sr && sr <= 'Z' { - sr = sr + 'a' - 'A' - } - if 'A' <= tr && tr <= 'Z' { - tr = tr + 'a' - 'A' - } - if sr != tr { - return false - } - } - return s == t -} - -// tokenListContainsValue returns true if the 1#token header with the given -// name contains a token equal to value with ASCII case folding. -func tokenListContainsValue(header http.Header, name string, value string) bool { -headers: - for _, s := range header[name] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - s = skipSpace(s) - if s != "" && s[0] != ',' { - continue headers - } - if equalASCIIFold(t, value) { - return true - } - if s == "" { - continue headers - } - s = s[1:] - } - } - return false -} - -// parseExtensions parses WebSocket extensions from a header. -func parseExtensions(header http.Header) []map[string]string { - // From RFC 6455: - // - // Sec-WebSocket-Extensions = extension-list - // extension-list = 1#extension - // extension = extension-token *( ";" extension-param ) - // extension-token = registered-token - // registered-token = token - // extension-param = token [ "=" (token | quoted-string) ] - // ;When using the quoted-string syntax variant, the value - // ;after quoted-string unescaping MUST conform to the - // ;'token' ABNF. - - var result []map[string]string -headers: - for _, s := range header["Sec-Websocket-Extensions"] { - for { - var t string - t, s = nextToken(skipSpace(s)) - if t == "" { - continue headers - } - ext := map[string]string{"": t} - for { - s = skipSpace(s) - if !strings.HasPrefix(s, ";") { - break - } - var k string - k, s = nextToken(skipSpace(s[1:])) - if k == "" { - continue headers - } - s = skipSpace(s) - var v string - if strings.HasPrefix(s, "=") { - v, s = nextTokenOrQuoted(skipSpace(s[1:])) - s = skipSpace(s) - } - if s != "" && s[0] != ',' && s[0] != ';' { - continue headers - } - ext[k] = v - } - if s != "" && s[0] != ',' { - continue headers - } - result = append(result, ext) - if s == "" { - continue headers - } - s = s[1:] - } - } - return result -} - -// isValidChallengeKey checks if the argument meets RFC6455 specification. -func isValidChallengeKey(s string) bool { - // From RFC6455: - // - // A |Sec-WebSocket-Key| header field with a base64-encoded (see - // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in - // length. - - if s == "" { - return false - } - decoded, err := base64.StdEncoding.DecodeString(s) - return err == nil && len(decoded) == 16 -} diff --git a/vendor/github.com/mxk/go-flowrate/LICENSE b/vendor/github.com/mxk/go-flowrate/LICENSE deleted file mode 100644 index e9f9f628ba5..00000000000 --- a/vendor/github.com/mxk/go-flowrate/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - - * Neither the name of the go-flowrate project nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go deleted file mode 100644 index 1b727721e14..00000000000 --- a/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go +++ /dev/null @@ -1,267 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -// Package flowrate provides the tools for monitoring and limiting the flow rate -// of an arbitrary data stream. -package flowrate - -import ( - "math" - "sync" - "time" -) - -// Monitor monitors and limits the transfer rate of a data stream. -type Monitor struct { - mu sync.Mutex // Mutex guarding access to all internal fields - active bool // Flag indicating an active transfer - start time.Duration // Transfer start time (clock() value) - bytes int64 // Total number of bytes transferred - samples int64 // Total number of samples taken - - rSample float64 // Most recent transfer rate sample (bytes per second) - rEMA float64 // Exponential moving average of rSample - rPeak float64 // Peak transfer rate (max of all rSamples) - rWindow float64 // rEMA window (seconds) - - sBytes int64 // Number of bytes transferred since sLast - sLast time.Duration // Most recent sample time (stop time when inactive) - sRate time.Duration // Sampling rate - - tBytes int64 // Number of bytes expected in the current transfer - tLast time.Duration // Time of the most recent transfer of at least 1 byte -} - -// New creates a new flow control monitor. Instantaneous transfer rate is -// measured and updated for each sampleRate interval. windowSize determines the -// weight of each sample in the exponential moving average (EMA) calculation. -// The exact formulas are: -// -// sampleTime = currentTime - prevSampleTime -// sampleRate = byteCount / sampleTime -// weight = 1 - exp(-sampleTime/windowSize) -// newRate = weight*sampleRate + (1-weight)*oldRate -// -// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, -// respectively. -func New(sampleRate, windowSize time.Duration) *Monitor { - if sampleRate = clockRound(sampleRate); sampleRate <= 0 { - sampleRate = 5 * clockRate - } - if windowSize <= 0 { - windowSize = 1 * time.Second - } - now := clock() - return &Monitor{ - active: true, - start: now, - rWindow: windowSize.Seconds(), - sLast: now, - sRate: sampleRate, - tLast: now, - } -} - -// Update records the transfer of n bytes and returns n. It should be called -// after each Read/Write operation, even if n is 0. -func (m *Monitor) Update(n int) int { - m.mu.Lock() - m.update(n) - m.mu.Unlock() - return n -} - -// IO is a convenience method intended to wrap io.Reader and io.Writer method -// execution. It calls m.Update(n) and then returns (n, err) unmodified. -func (m *Monitor) IO(n int, err error) (int, error) { - return m.Update(n), err -} - -// Done marks the transfer as finished and prevents any further updates or -// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and -// Limit methods become NOOPs. It returns the total number of bytes transferred. -func (m *Monitor) Done() int64 { - m.mu.Lock() - if now := m.update(0); m.sBytes > 0 { - m.reset(now) - } - m.active = false - m.tLast = 0 - n := m.bytes - m.mu.Unlock() - return n -} - -// timeRemLimit is the maximum Status.TimeRem value. -const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second - -// Status represents the current Monitor status. All transfer rates are in bytes -// per second rounded to the nearest byte. -type Status struct { - Active bool // Flag indicating an active transfer - Start time.Time // Transfer start time - Duration time.Duration // Time period covered by the statistics - Idle time.Duration // Time since the last transfer of at least 1 byte - Bytes int64 // Total number of bytes transferred - Samples int64 // Total number of samples taken - InstRate int64 // Instantaneous transfer rate - CurRate int64 // Current transfer rate (EMA of InstRate) - AvgRate int64 // Average transfer rate (Bytes / Duration) - PeakRate int64 // Maximum instantaneous transfer rate - BytesRem int64 // Number of bytes remaining in the transfer - TimeRem time.Duration // Estimated time to completion - Progress Percent // Overall transfer progress -} - -// Status returns current transfer status information. The returned value -// becomes static after a call to Done. -func (m *Monitor) Status() Status { - m.mu.Lock() - now := m.update(0) - s := Status{ - Active: m.active, - Start: clockToTime(m.start), - Duration: m.sLast - m.start, - Idle: now - m.tLast, - Bytes: m.bytes, - Samples: m.samples, - PeakRate: round(m.rPeak), - BytesRem: m.tBytes - m.bytes, - Progress: percentOf(float64(m.bytes), float64(m.tBytes)), - } - if s.BytesRem < 0 { - s.BytesRem = 0 - } - if s.Duration > 0 { - rAvg := float64(s.Bytes) / s.Duration.Seconds() - s.AvgRate = round(rAvg) - if s.Active { - s.InstRate = round(m.rSample) - s.CurRate = round(m.rEMA) - if s.BytesRem > 0 { - if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { - ns := float64(s.BytesRem) / tRate * 1e9 - if ns > float64(timeRemLimit) { - ns = float64(timeRemLimit) - } - s.TimeRem = clockRound(time.Duration(ns)) - } - } - } - } - m.mu.Unlock() - return s -} - -// Limit restricts the instantaneous (per-sample) data flow to rate bytes per -// second. It returns the maximum number of bytes (0 <= n <= want) that may be -// transferred immediately without exceeding the limit. If block == true, the -// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, -// or the transfer is inactive (after a call to Done). -// -// At least one byte is always allowed to be transferred in any given sampling -// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate -// is 10 bytes per second. -// -// For usage examples, see the implementation of Reader and Writer in io.go. -func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { - if want < 1 || rate < 1 { - return want - } - m.mu.Lock() - - // Determine the maximum number of bytes that can be sent in one sample - limit := round(float64(rate) * m.sRate.Seconds()) - if limit <= 0 { - limit = 1 - } - - // If block == true, wait until m.sBytes < limit - if now := m.update(0); block { - for m.sBytes >= limit && m.active { - now = m.waitNextSample(now) - } - } - - // Make limit <= want (unlimited if the transfer is no longer active) - if limit -= m.sBytes; limit > int64(want) || !m.active { - limit = int64(want) - } - m.mu.Unlock() - - if limit < 0 { - limit = 0 - } - return int(limit) -} - -// SetTransferSize specifies the total size of the data transfer, which allows -// the Monitor to calculate the overall progress and time to completion. -func (m *Monitor) SetTransferSize(bytes int64) { - if bytes < 0 { - bytes = 0 - } - m.mu.Lock() - m.tBytes = bytes - m.mu.Unlock() -} - -// update accumulates the transferred byte count for the current sample until -// clock() - m.sLast >= m.sRate. The monitor status is updated once the current -// sample is done. -func (m *Monitor) update(n int) (now time.Duration) { - if !m.active { - return - } - if now = clock(); n > 0 { - m.tLast = now - } - m.sBytes += int64(n) - if sTime := now - m.sLast; sTime >= m.sRate { - t := sTime.Seconds() - if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { - m.rPeak = m.rSample - } - - // Exponential moving average using a method similar to *nix load - // average calculation. Longer sampling periods carry greater weight. - if m.samples > 0 { - w := math.Exp(-t / m.rWindow) - m.rEMA = m.rSample + w*(m.rEMA-m.rSample) - } else { - m.rEMA = m.rSample - } - m.reset(now) - } - return -} - -// reset clears the current sample state in preparation for the next sample. -func (m *Monitor) reset(sampleTime time.Duration) { - m.bytes += m.sBytes - m.samples++ - m.sBytes = 0 - m.sLast = sampleTime -} - -// waitNextSample sleeps for the remainder of the current sample. The lock is -// released and reacquired during the actual sleep period, so it's possible for -// the transfer to be inactive when this method returns. -func (m *Monitor) waitNextSample(now time.Duration) time.Duration { - const minWait = 5 * time.Millisecond - current := m.sLast - - // sleep until the last sample time changes (ideally, just one iteration) - for m.sLast == current && m.active { - d := current + m.sRate - now - m.mu.Unlock() - if d < minWait { - d = minWait - } - time.Sleep(d) - m.mu.Lock() - now = m.update(0) - } - return now -} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/io.go b/vendor/github.com/mxk/go-flowrate/flowrate/io.go deleted file mode 100644 index fbe0909725a..00000000000 --- a/vendor/github.com/mxk/go-flowrate/flowrate/io.go +++ /dev/null @@ -1,133 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "errors" - "io" -) - -// ErrLimit is returned by the Writer when a non-blocking write is short due to -// the transfer rate limit. -var ErrLimit = errors.New("flowrate: flow rate limit exceeded") - -// Limiter is implemented by the Reader and Writer to provide a consistent -// interface for monitoring and controlling data transfer. -type Limiter interface { - Done() int64 - Status() Status - SetTransferSize(bytes int64) - SetLimit(new int64) (old int64) - SetBlocking(new bool) (old bool) -} - -// Reader implements io.ReadCloser with a restriction on the rate of data -// transfer. -type Reader struct { - io.Reader // Data source - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be read due to the limit -} - -// NewReader restricts all Read operations on r to limit bytes per second. -func NewReader(r io.Reader, limit int64) *Reader { - return &Reader{r, New(0, 0), limit, true} -} - -// Read reads up to len(p) bytes into p without exceeding the current transfer -// rate limit. It returns (0, nil) immediately if r is non-blocking and no new -// bytes can be read at this time. -func (r *Reader) Read(p []byte) (n int, err error) { - p = p[:r.Limit(len(p), r.limit, r.block)] - if len(p) > 0 { - n, err = r.IO(r.Reader.Read(p)) - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (r *Reader) SetLimit(new int64) (old int64) { - old, r.limit = r.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Read call on a non-blocking reader returns immediately if no additional bytes -// may be read at this time due to the rate limit. -func (r *Reader) SetBlocking(new bool) (old bool) { - old, r.block = r.block, new - return -} - -// Close closes the underlying reader if it implements the io.Closer interface. -func (r *Reader) Close() error { - defer r.Done() - if c, ok := r.Reader.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Writer implements io.WriteCloser with a restriction on the rate of data -// transfer. -type Writer struct { - io.Writer // Data destination - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be written due to the limit -} - -// NewWriter restricts all Write operations on w to limit bytes per second. The -// transfer rate and the default blocking behavior (true) can be changed -// directly on the returned *Writer. -func NewWriter(w io.Writer, limit int64) *Writer { - return &Writer{w, New(0, 0), limit, true} -} - -// Write writes len(p) bytes from p to the underlying data stream without -// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is -// non-blocking and no additional bytes can be written at this time. -func (w *Writer) Write(p []byte) (n int, err error) { - var c int - for len(p) > 0 && err == nil { - s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { - return n, ErrLimit - } - p = p[c:] - n += c - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (w *Writer) SetLimit(new int64) (old int64) { - old, w.limit = w.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Write call on a non-blocking writer returns as soon as no additional bytes -// may be written at this time due to the rate limit. -func (w *Writer) SetBlocking(new bool) (old bool) { - old, w.block = w.block, new - return -} - -// Close closes the underlying writer if it implements the io.Closer interface. -func (w *Writer) Close() error { - defer w.Done() - if c, ok := w.Writer.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/util.go b/vendor/github.com/mxk/go-flowrate/flowrate/util.go deleted file mode 100644 index 4caac583fc0..00000000000 --- a/vendor/github.com/mxk/go-flowrate/flowrate/util.go +++ /dev/null @@ -1,67 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "math" - "strconv" - "time" -) - -// clockRate is the resolution and precision of clock(). -const clockRate = 20 * time.Millisecond - -// czero is the process start time rounded down to the nearest clockRate -// increment. -var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate - -// clock returns a low resolution timestamp relative to the process start time. -func clock() time.Duration { - return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero -} - -// clockToTime converts a clock() timestamp to an absolute time.Time value. -func clockToTime(c time.Duration) time.Time { - return time.Unix(0, int64(czero+c)) -} - -// clockRound returns d rounded to the nearest clockRate increment. -func clockRound(d time.Duration) time.Duration { - return (d + clockRate>>1) / clockRate * clockRate -} - -// round returns x rounded to the nearest int64 (non-negative values only). -func round(x float64) int64 { - if _, frac := math.Modf(x); frac >= 0.5 { - return int64(math.Ceil(x)) - } - return int64(math.Floor(x)) -} - -// Percent represents a percentage in increments of 1/1000th of a percent. -type Percent uint32 - -// percentOf calculates what percent of the total is x. -func percentOf(x, total float64) Percent { - if x < 0 || total <= 0 { - return 0 - } else if p := round(x / total * 1e5); p <= math.MaxUint32 { - return Percent(p) - } - return Percent(math.MaxUint32) -} - -func (p Percent) Float() float64 { - return float64(p) * 1e-3 -} - -func (p Percent) String() string { - var buf [12]byte - b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) - n := len(b) - b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) - b[n] = '.' - return string(append(b, '%')) -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index bd6b17e1588..575456c8386 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -226,7 +226,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } type MatchCondition v1.MatchCondition diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 12c680dc972..c199702fbd0 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -242,7 +242,7 @@ type ValidatingAdmissionPolicySpec struct { // +listType=map // +listMapKey=name // +optional - Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` + Variables []Variable `json:"variables" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"` } // ParamKind is a tuple of Group Kind and Version. diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index f8997798892..4f0822440fa 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -229,8 +229,8 @@ message JobSpec { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). // +optional optional int32 backoffLimitPerIndex = 12; @@ -242,8 +242,8 @@ message JobSpec { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). // +optional optional int32 maxFailedIndexes = 13; @@ -326,8 +326,7 @@ message JobSpec { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - // This is on by default. + // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. // +optional optional string podReplacementPolicy = 14; } @@ -376,8 +375,8 @@ message JobStatus { // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). // - // This field is beta-level. The job controller populates the field when - // the feature gate JobPodReplacementPolicy is enabled (enabled by default). + // This field is alpha-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (disabled by default). // +optional optional int32 terminating = 11; @@ -399,8 +398,8 @@ message JobStatus { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). // +optional optional string failedIndexes = 10; @@ -422,6 +421,9 @@ message JobStatus { optional UncountedTerminatedPods uncountedTerminatedPods = 8; // The number of pods which have a Ready condition. + // + // This field is beta-level. The job controller populates the field when + // the feature gate JobReadyPods is enabled (enabled by default). // +optional optional int32 ready = 9; } @@ -510,8 +512,8 @@ message PodFailurePolicyRule { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is beta-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 53fdf3c8d01..8a28614c0b4 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -124,7 +124,6 @@ const ( // This is an action which might be taken on a pod failure - mark the // Job's index as failed to avoid restarts within this index. This action // can only be used when backoffLimitPerIndex is set. - // This value is beta-level. PodFailurePolicyActionFailIndex PodFailurePolicyAction = "FailIndex" // This is an action which might be taken on a pod failure - the counter towards @@ -219,8 +218,8 @@ type PodFailurePolicyRule struct { // running pods are terminated. // - FailIndex: indicates that the pod's index is marked as Failed and will // not be restarted. - // This value is beta-level. It can be used when the - // `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default). + // This value is alpha-level. It can be used when the + // `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default). // - Ignore: indicates that the counter towards the .backoffLimit is not // incremented and a replacement pod is created. // - Count: indicates that the pod is handled in the default way - the @@ -304,8 +303,8 @@ type JobSpec struct { // batch.kubernetes.io/job-index-failure-count annotation. It can only // be set when Job's completionMode=Indexed, and the Pod's restart // policy is Never. The field is immutable. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). // +optional BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty" protobuf:"varint,12,opt,name=backoffLimitPerIndex"` @@ -317,8 +316,8 @@ type JobSpec struct { // It can only be specified when backoffLimitPerIndex is set. // It can be null or up to completions. It is required and must be // less than or equal to 10^4 when is completions greater than 10^5. - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). // +optional MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty" protobuf:"varint,13,opt,name=maxFailedIndexes"` @@ -406,8 +405,7 @@ type JobSpec struct { // // When using podFailurePolicy, Failed is the the only allowed value. // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. - // This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. - // This is on by default. + // This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field. // +optional PodReplacementPolicy *PodReplacementPolicy `json:"podReplacementPolicy,omitempty" protobuf:"bytes,14,opt,name=podReplacementPolicy,casttype=podReplacementPolicy"` } @@ -456,8 +454,8 @@ type JobStatus struct { // The number of pods which are terminating (in phase Pending or Running // and have a deletionTimestamp). // - // This field is beta-level. The job controller populates the field when - // the feature gate JobPodReplacementPolicy is enabled (enabled by default). + // This field is alpha-level. The job controller populates the field when + // the feature gate JobPodReplacementPolicy is enabled (disabled by default). // +optional Terminating *int32 `json:"terminating,omitempty" protobuf:"varint,11,opt,name=terminating"` @@ -479,8 +477,8 @@ type JobStatus struct { // last element of the series, separated by a hyphen. // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are // represented as "1,3-5,7". - // This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` - // feature gate is enabled (enabled by default). + // This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` + // feature gate is enabled (disabled by default). // +optional FailedIndexes *string `json:"failedIndexes,omitempty" protobuf:"bytes,10,opt,name=failedIndexes"` @@ -502,6 +500,9 @@ type JobStatus struct { UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"` // The number of pods which have a Ready condition. + // + // This field is beta-level. The job controller populates the field when + // the feature gate JobReadyPods is enabled (enabled by default). // +optional Ready *int32 `json:"ready,omitempty" protobuf:"varint,9,opt,name=ready"` } @@ -534,25 +535,6 @@ const ( JobFailureTarget JobConditionType = "FailureTarget" ) -const ( - // JobReasonPodFailurePolicy reason indicates a job failure condition is added due to - // a failed pod matching a pod failure policy rule - // https://kep.k8s.io/3329 - // This is currently a beta field. - JobReasonPodFailurePolicy string = "PodFailurePolicy" - // JobReasonBackOffLimitExceeded reason indicates that pods within a job have failed a number of - // times higher than backOffLimit times. - JobReasonBackoffLimitExceeded string = "BackoffLimitExceeded" - // JobReasponDeadlineExceeded means job duration is past ActiveDeadline - JobReasonDeadlineExceeded string = "DeadlineExceeded" - // JobReasonMaxFailedIndexesExceeded indicates that an indexed of a job failed - // This const is used in beta-level feature: https://kep.k8s.io/3850. - JobReasonMaxFailedIndexesExceeded string = "MaxFailedIndexesExceeded" - // JobReasonFailedIndexes means Job has failed indexes. - // This const is used in beta-level feature: https://kep.k8s.io/3850. - JobReasonFailedIndexes string = "FailedIndexes" -) - // JobCondition describes current state of a job. type JobCondition struct { // Type of job condition, Complete or Failed. diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 980f1e47505..43b4e1e7d94 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -117,15 +117,15 @@ var map_JobSpec = map[string]string{ "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", - "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", - "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", + "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", "template": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", "completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", "suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", - "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an alpha field. Enable JobPodReplacementPolicy to be able to use this field.", } func (JobSpec) SwaggerDoc() map[string]string { @@ -140,11 +140,11 @@ var map_JobStatus = map[string]string{ "active": "The number of pending and running pods.", "succeeded": "The number of pods which reached phase Succeeded.", "failed": "The number of pods which reached phase Failed.", - "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "terminating": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is alpha-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (disabled by default).", "completedIndexes": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", - "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "failedIndexes": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is alpha-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).", "uncountedTerminatedPods": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", - "ready": "The number of pods which have a Ready condition.", + "ready": "The number of pods which have a Ready condition.\n\nThis field is beta-level. The job controller populates the field when the feature gate JobReadyPods is enabled (enabled by default).", } func (JobStatus) SwaggerDoc() map[string]string { @@ -193,7 +193,7 @@ func (PodFailurePolicyOnPodConditionsPattern) SwaggerDoc() map[string]string { var map_PodFailurePolicyRule = map[string]string{ "": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", - "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "action": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is alpha-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (disabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", "onExitCodes": "Represents the requirement on the container exit codes.", "onPodConditions": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", } diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 04c7939e0d5..c267a5febde 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -525,38 +525,10 @@ func (m *ClientIPConfig) XXX_DiscardUnknown() { var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo -func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} } -func (*ClusterTrustBundleProjection) ProtoMessage() {} -func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} -} -func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterTrustBundleProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterTrustBundleProjection) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterTrustBundleProjection.Merge(m, src) -} -func (m *ClusterTrustBundleProjection) XXX_Size() int { - return m.Size() -} -func (m *ClusterTrustBundleProjection) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterTrustBundleProjection.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo - func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} + return fileDescriptor_83c10c24ec417dc9, []int{17} } func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -584,7 +556,7 @@ var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} + return fileDescriptor_83c10c24ec417dc9, []int{18} } func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -612,7 +584,7 @@ var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} + return fileDescriptor_83c10c24ec417dc9, []int{19} } func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -640,7 +612,7 @@ var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} + return fileDescriptor_83c10c24ec417dc9, []int{20} } func (m *ConfigMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -668,7 +640,7 @@ var xxx_messageInfo_ConfigMap proto.InternalMessageInfo func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} + return fileDescriptor_83c10c24ec417dc9, []int{21} } func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -696,7 +668,7 @@ var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} + return fileDescriptor_83c10c24ec417dc9, []int{22} } func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +696,7 @@ var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} + return fileDescriptor_83c10c24ec417dc9, []int{23} } func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -752,7 +724,7 @@ var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } func (*ConfigMapNodeConfigSource) ProtoMessage() {} func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} + return fileDescriptor_83c10c24ec417dc9, []int{24} } func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +752,7 @@ var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} + return fileDescriptor_83c10c24ec417dc9, []int{25} } func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +780,7 @@ var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} + return fileDescriptor_83c10c24ec417dc9, []int{26} } func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -836,7 +808,7 @@ var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} + return fileDescriptor_83c10c24ec417dc9, []int{27} } func (m *Container) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +836,7 @@ var xxx_messageInfo_Container proto.InternalMessageInfo func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} + return fileDescriptor_83c10c24ec417dc9, []int{28} } func (m *ContainerImage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +864,7 @@ var xxx_messageInfo_ContainerImage proto.InternalMessageInfo func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} + return fileDescriptor_83c10c24ec417dc9, []int{29} } func (m *ContainerPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -920,7 +892,7 @@ var xxx_messageInfo_ContainerPort proto.InternalMessageInfo func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} } func (*ContainerResizePolicy) ProtoMessage() {} func (*ContainerResizePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} + return fileDescriptor_83c10c24ec417dc9, []int{30} } func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -948,7 +920,7 @@ var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} + return fileDescriptor_83c10c24ec417dc9, []int{31} } func (m *ContainerState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +948,7 @@ var xxx_messageInfo_ContainerState proto.InternalMessageInfo func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} + return fileDescriptor_83c10c24ec417dc9, []int{32} } func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1004,7 +976,7 @@ var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} + return fileDescriptor_83c10c24ec417dc9, []int{33} } func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1004,7 @@ var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} + return fileDescriptor_83c10c24ec417dc9, []int{34} } func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1060,7 +1032,7 @@ var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} + return fileDescriptor_83c10c24ec417dc9, []int{35} } func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1088,7 +1060,7 @@ var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} + return fileDescriptor_83c10c24ec417dc9, []int{36} } func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1088,7 @@ var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} + return fileDescriptor_83c10c24ec417dc9, []int{37} } func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1144,7 +1116,7 @@ var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} + return fileDescriptor_83c10c24ec417dc9, []int{38} } func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1172,7 +1144,7 @@ var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} + return fileDescriptor_83c10c24ec417dc9, []int{39} } func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1200,7 +1172,7 @@ var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} + return fileDescriptor_83c10c24ec417dc9, []int{40} } func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,7 +1200,7 @@ var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} + return fileDescriptor_83c10c24ec417dc9, []int{41} } func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1256,7 +1228,7 @@ var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} + return fileDescriptor_83c10c24ec417dc9, []int{42} } func (m *EndpointPort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1284,7 +1256,7 @@ var xxx_messageInfo_EndpointPort proto.InternalMessageInfo func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} + return fileDescriptor_83c10c24ec417dc9, []int{43} } func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1312,7 +1284,7 @@ var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} + return fileDescriptor_83c10c24ec417dc9, []int{44} } func (m *Endpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1340,7 +1312,7 @@ var xxx_messageInfo_Endpoints proto.InternalMessageInfo func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} + return fileDescriptor_83c10c24ec417dc9, []int{45} } func (m *EndpointsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1340,7 @@ var xxx_messageInfo_EndpointsList proto.InternalMessageInfo func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} + return fileDescriptor_83c10c24ec417dc9, []int{46} } func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1396,7 +1368,7 @@ var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} + return fileDescriptor_83c10c24ec417dc9, []int{47} } func (m *EnvVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1424,7 +1396,7 @@ var xxx_messageInfo_EnvVar proto.InternalMessageInfo func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} + return fileDescriptor_83c10c24ec417dc9, []int{48} } func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1452,7 +1424,7 @@ var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } func (*EphemeralContainer) ProtoMessage() {} func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} + return fileDescriptor_83c10c24ec417dc9, []int{49} } func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1480,7 +1452,7 @@ var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } func (*EphemeralContainerCommon) ProtoMessage() {} func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} + return fileDescriptor_83c10c24ec417dc9, []int{50} } func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1508,7 +1480,7 @@ var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} } func (*EphemeralVolumeSource) ProtoMessage() {} func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} + return fileDescriptor_83c10c24ec417dc9, []int{51} } func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1536,7 +1508,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} + return fileDescriptor_83c10c24ec417dc9, []int{52} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1564,7 +1536,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} + return fileDescriptor_83c10c24ec417dc9, []int{53} } func (m *EventList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1592,7 +1564,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo func (m *EventSeries) Reset() { *m = EventSeries{} } func (*EventSeries) ProtoMessage() {} func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} + return fileDescriptor_83c10c24ec417dc9, []int{54} } func (m *EventSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1620,7 +1592,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} + return fileDescriptor_83c10c24ec417dc9, []int{55} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1648,7 +1620,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} + return fileDescriptor_83c10c24ec417dc9, []int{56} } func (m *ExecAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1676,7 +1648,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} + return fileDescriptor_83c10c24ec417dc9, []int{57} } func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1704,7 +1676,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } func (*FlexPersistentVolumeSource) ProtoMessage() {} func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} + return fileDescriptor_83c10c24ec417dc9, []int{58} } func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1732,7 +1704,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} + return fileDescriptor_83c10c24ec417dc9, []int{59} } func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1760,7 +1732,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} + return fileDescriptor_83c10c24ec417dc9, []int{60} } func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1788,7 +1760,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} + return fileDescriptor_83c10c24ec417dc9, []int{61} } func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1816,7 +1788,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo func (m *GRPCAction) Reset() { *m = GRPCAction{} } func (*GRPCAction) ProtoMessage() {} func (*GRPCAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} + return fileDescriptor_83c10c24ec417dc9, []int{62} } func (m *GRPCAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1816,7 @@ var xxx_messageInfo_GRPCAction proto.InternalMessageInfo func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} + return fileDescriptor_83c10c24ec417dc9, []int{63} } func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1872,7 +1844,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} + return fileDescriptor_83c10c24ec417dc9, []int{64} } func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1900,7 +1872,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} + return fileDescriptor_83c10c24ec417dc9, []int{65} } func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1928,7 +1900,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} + return fileDescriptor_83c10c24ec417dc9, []int{66} } func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1956,7 +1928,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} + return fileDescriptor_83c10c24ec417dc9, []int{67} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1984,7 +1956,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} + return fileDescriptor_83c10c24ec417dc9, []int{68} } func (m *HostAlias) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2012,7 +1984,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo func (m *HostIP) Reset() { *m = HostIP{} } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} + return fileDescriptor_83c10c24ec417dc9, []int{69} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2040,7 +2012,7 @@ var xxx_messageInfo_HostIP proto.InternalMessageInfo func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} + return fileDescriptor_83c10c24ec417dc9, []int{70} } func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2068,7 +2040,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } func (*ISCSIPersistentVolumeSource) ProtoMessage() {} func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} + return fileDescriptor_83c10c24ec417dc9, []int{71} } func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2096,7 +2068,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} + return fileDescriptor_83c10c24ec417dc9, []int{72} } func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2124,7 +2096,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} + return fileDescriptor_83c10c24ec417dc9, []int{73} } func (m *KeyToPath) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2152,7 +2124,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} + return fileDescriptor_83c10c24ec417dc9, []int{74} } func (m *Lifecycle) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2180,7 +2152,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} } func (*LifecycleHandler) ProtoMessage() {} func (*LifecycleHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} + return fileDescriptor_83c10c24ec417dc9, []int{75} } func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2208,7 +2180,7 @@ var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} + return fileDescriptor_83c10c24ec417dc9, []int{76} } func (m *LimitRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2236,7 +2208,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} + return fileDescriptor_83c10c24ec417dc9, []int{77} } func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2264,7 +2236,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} + return fileDescriptor_83c10c24ec417dc9, []int{78} } func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2292,7 +2264,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} + return fileDescriptor_83c10c24ec417dc9, []int{79} } func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2320,7 +2292,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} + return fileDescriptor_83c10c24ec417dc9, []int{80} } func (m *List) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2348,7 +2320,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} + return fileDescriptor_83c10c24ec417dc9, []int{81} } func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2348,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} + return fileDescriptor_83c10c24ec417dc9, []int{82} } func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2404,7 +2376,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} + return fileDescriptor_83c10c24ec417dc9, []int{83} } func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2404,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} + return fileDescriptor_83c10c24ec417dc9, []int{84} } func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2457,38 +2429,10 @@ func (m *LocalVolumeSource) XXX_DiscardUnknown() { var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo -func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} } -func (*ModifyVolumeStatus) ProtoMessage() {} -func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} -} -func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ModifyVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ModifyVolumeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModifyVolumeStatus.Merge(m, src) -} -func (m *ModifyVolumeStatus) XXX_Size() int { - return m.Size() -} -func (m *ModifyVolumeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ModifyVolumeStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo - func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} + return fileDescriptor_83c10c24ec417dc9, []int{85} } func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2516,7 +2460,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} + return fileDescriptor_83c10c24ec417dc9, []int{86} } func (m *Namespace) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2544,7 +2488,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } func (*NamespaceCondition) ProtoMessage() {} func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} + return fileDescriptor_83c10c24ec417dc9, []int{87} } func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2572,7 +2516,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} + return fileDescriptor_83c10c24ec417dc9, []int{88} } func (m *NamespaceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2600,7 +2544,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} + return fileDescriptor_83c10c24ec417dc9, []int{89} } func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2628,7 +2572,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} + return fileDescriptor_83c10c24ec417dc9, []int{90} } func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2656,7 +2600,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} + return fileDescriptor_83c10c24ec417dc9, []int{91} } func (m *Node) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2684,7 +2628,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} + return fileDescriptor_83c10c24ec417dc9, []int{92} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2712,7 +2656,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} + return fileDescriptor_83c10c24ec417dc9, []int{93} } func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2740,7 +2684,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} + return fileDescriptor_83c10c24ec417dc9, []int{94} } func (m *NodeCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2768,7 +2712,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} + return fileDescriptor_83c10c24ec417dc9, []int{95} } func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2796,7 +2740,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } func (*NodeConfigStatus) ProtoMessage() {} func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} + return fileDescriptor_83c10c24ec417dc9, []int{96} } func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2824,7 +2768,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} + return fileDescriptor_83c10c24ec417dc9, []int{97} } func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2852,7 +2796,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} + return fileDescriptor_83c10c24ec417dc9, []int{98} } func (m *NodeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2880,7 +2824,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} + return fileDescriptor_83c10c24ec417dc9, []int{99} } func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2908,7 +2852,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} + return fileDescriptor_83c10c24ec417dc9, []int{100} } func (m *NodeResources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2936,7 +2880,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} + return fileDescriptor_83c10c24ec417dc9, []int{101} } func (m *NodeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2964,7 +2908,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} + return fileDescriptor_83c10c24ec417dc9, []int{102} } func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2992,7 +2936,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} + return fileDescriptor_83c10c24ec417dc9, []int{103} } func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3020,7 +2964,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} + return fileDescriptor_83c10c24ec417dc9, []int{104} } func (m *NodeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3048,7 +2992,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} + return fileDescriptor_83c10c24ec417dc9, []int{105} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3076,7 +3020,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} + return fileDescriptor_83c10c24ec417dc9, []int{106} } func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3104,7 +3048,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} + return fileDescriptor_83c10c24ec417dc9, []int{107} } func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3132,7 +3076,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} + return fileDescriptor_83c10c24ec417dc9, []int{108} } func (m *ObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3160,7 +3104,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} + return fileDescriptor_83c10c24ec417dc9, []int{109} } func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3188,7 +3132,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} + return fileDescriptor_83c10c24ec417dc9, []int{110} } func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3216,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} + return fileDescriptor_83c10c24ec417dc9, []int{111} } func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3244,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} + return fileDescriptor_83c10c24ec417dc9, []int{112} } func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3272,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} + return fileDescriptor_83c10c24ec417dc9, []int{113} } func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3300,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} + return fileDescriptor_83c10c24ec417dc9, []int{114} } func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3328,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} } func (*PersistentVolumeClaimTemplate) ProtoMessage() {} func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} + return fileDescriptor_83c10c24ec417dc9, []int{115} } func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3356,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} + return fileDescriptor_83c10c24ec417dc9, []int{116} } func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3384,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} + return fileDescriptor_83c10c24ec417dc9, []int{117} } func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3412,7 +3356,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} + return fileDescriptor_83c10c24ec417dc9, []int{118} } func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3440,7 +3384,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} + return fileDescriptor_83c10c24ec417dc9, []int{119} } func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3468,7 +3412,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} + return fileDescriptor_83c10c24ec417dc9, []int{120} } func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3496,7 +3440,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} + return fileDescriptor_83c10c24ec417dc9, []int{121} } func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3524,7 +3468,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} + return fileDescriptor_83c10c24ec417dc9, []int{122} } func (m *Pod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3552,7 +3496,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} + return fileDescriptor_83c10c24ec417dc9, []int{123} } func (m *PodAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3580,7 +3524,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} + return fileDescriptor_83c10c24ec417dc9, []int{124} } func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3608,7 +3552,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} + return fileDescriptor_83c10c24ec417dc9, []int{125} } func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3636,7 +3580,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} + return fileDescriptor_83c10c24ec417dc9, []int{126} } func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3664,7 +3608,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} + return fileDescriptor_83c10c24ec417dc9, []int{127} } func (m *PodCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3692,7 +3636,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } func (*PodDNSConfig) ProtoMessage() {} func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} + return fileDescriptor_83c10c24ec417dc9, []int{128} } func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3720,7 +3664,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } func (*PodDNSConfigOption) ProtoMessage() {} func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} + return fileDescriptor_83c10c24ec417dc9, []int{129} } func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3748,7 +3692,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} + return fileDescriptor_83c10c24ec417dc9, []int{130} } func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3776,7 +3720,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo func (m *PodIP) Reset() { *m = PodIP{} } func (*PodIP) ProtoMessage() {} func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} + return fileDescriptor_83c10c24ec417dc9, []int{131} } func (m *PodIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3804,7 +3748,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} + return fileDescriptor_83c10c24ec417dc9, []int{132} } func (m *PodList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3832,7 +3776,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} + return fileDescriptor_83c10c24ec417dc9, []int{133} } func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3860,7 +3804,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo func (m *PodOS) Reset() { *m = PodOS{} } func (*PodOS) ProtoMessage() {} func (*PodOS) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} + return fileDescriptor_83c10c24ec417dc9, []int{134} } func (m *PodOS) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3888,7 +3832,7 @@ var xxx_messageInfo_PodOS proto.InternalMessageInfo func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} + return fileDescriptor_83c10c24ec417dc9, []int{135} } func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3916,7 +3860,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} + return fileDescriptor_83c10c24ec417dc9, []int{136} } func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3944,7 +3888,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } func (*PodReadinessGate) ProtoMessage() {} func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} + return fileDescriptor_83c10c24ec417dc9, []int{137} } func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3972,7 +3916,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} } func (*PodResourceClaim) ProtoMessage() {} func (*PodResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} + return fileDescriptor_83c10c24ec417dc9, []int{138} } func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4000,7 +3944,7 @@ var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} } func (*PodResourceClaimStatus) ProtoMessage() {} func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} + return fileDescriptor_83c10c24ec417dc9, []int{139} } func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4028,7 +3972,7 @@ var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} } func (*PodSchedulingGate) ProtoMessage() {} func (*PodSchedulingGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} + return fileDescriptor_83c10c24ec417dc9, []int{140} } func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4056,7 +4000,7 @@ var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} + return fileDescriptor_83c10c24ec417dc9, []int{141} } func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4084,7 +4028,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} + return fileDescriptor_83c10c24ec417dc9, []int{142} } func (m *PodSignature) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4112,7 +4056,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} + return fileDescriptor_83c10c24ec417dc9, []int{143} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4140,7 +4084,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} + return fileDescriptor_83c10c24ec417dc9, []int{144} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4168,7 +4112,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} + return fileDescriptor_83c10c24ec417dc9, []int{145} } func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4196,7 +4140,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} + return fileDescriptor_83c10c24ec417dc9, []int{146} } func (m *PodTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4224,7 +4168,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} + return fileDescriptor_83c10c24ec417dc9, []int{147} } func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4252,7 +4196,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} + return fileDescriptor_83c10c24ec417dc9, []int{148} } func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4280,7 +4224,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} + return fileDescriptor_83c10c24ec417dc9, []int{149} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4308,7 +4252,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} + return fileDescriptor_83c10c24ec417dc9, []int{150} } func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4336,7 +4280,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} + return fileDescriptor_83c10c24ec417dc9, []int{151} } func (m *Preconditions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4364,7 +4308,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} + return fileDescriptor_83c10c24ec417dc9, []int{152} } func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4392,7 +4336,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} + return fileDescriptor_83c10c24ec417dc9, []int{153} } func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4420,7 +4364,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} + return fileDescriptor_83c10c24ec417dc9, []int{154} } func (m *Probe) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4448,7 +4392,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo func (m *ProbeHandler) Reset() { *m = ProbeHandler{} } func (*ProbeHandler) ProtoMessage() {} func (*ProbeHandler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} + return fileDescriptor_83c10c24ec417dc9, []int{155} } func (m *ProbeHandler) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,7 +4420,7 @@ var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} + return fileDescriptor_83c10c24ec417dc9, []int{156} } func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4504,7 +4448,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} + return fileDescriptor_83c10c24ec417dc9, []int{157} } func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4532,7 +4476,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} + return fileDescriptor_83c10c24ec417dc9, []int{158} } func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4560,7 +4504,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} + return fileDescriptor_83c10c24ec417dc9, []int{159} } func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4588,7 +4532,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} + return fileDescriptor_83c10c24ec417dc9, []int{160} } func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4616,7 +4560,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} + return fileDescriptor_83c10c24ec417dc9, []int{161} } func (m *ReplicationController) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4644,7 +4588,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} + return fileDescriptor_83c10c24ec417dc9, []int{162} } func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4672,7 +4616,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} + return fileDescriptor_83c10c24ec417dc9, []int{163} } func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4700,7 +4644,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} + return fileDescriptor_83c10c24ec417dc9, []int{164} } func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4728,7 +4672,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} + return fileDescriptor_83c10c24ec417dc9, []int{165} } func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4756,7 +4700,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} + return fileDescriptor_83c10c24ec417dc9, []int{166} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4784,7 +4728,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} + return fileDescriptor_83c10c24ec417dc9, []int{167} } func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4812,7 +4756,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} + return fileDescriptor_83c10c24ec417dc9, []int{168} } func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4840,7 +4784,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} + return fileDescriptor_83c10c24ec417dc9, []int{169} } func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4868,7 +4812,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} + return fileDescriptor_83c10c24ec417dc9, []int{170} } func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4896,7 +4840,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} + return fileDescriptor_83c10c24ec417dc9, []int{171} } func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4924,7 +4868,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} + return fileDescriptor_83c10c24ec417dc9, []int{172} } func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4952,7 +4896,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} + return fileDescriptor_83c10c24ec417dc9, []int{173} } func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4980,7 +4924,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} + return fileDescriptor_83c10c24ec417dc9, []int{174} } func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5008,7 +4952,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} + return fileDescriptor_83c10c24ec417dc9, []int{175} } func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5036,7 +4980,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } func (*ScopeSelector) ProtoMessage() {} func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} + return fileDescriptor_83c10c24ec417dc9, []int{176} } func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5064,7 +5008,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } func (*ScopedResourceSelectorRequirement) ProtoMessage() {} func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} + return fileDescriptor_83c10c24ec417dc9, []int{177} } func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5092,7 +5036,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo func (m *SeccompProfile) Reset() { *m = SeccompProfile{} } func (*SeccompProfile) ProtoMessage() {} func (*SeccompProfile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} + return fileDescriptor_83c10c24ec417dc9, []int{178} } func (m *SeccompProfile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5120,7 +5064,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} + return fileDescriptor_83c10c24ec417dc9, []int{179} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5148,7 +5092,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} + return fileDescriptor_83c10c24ec417dc9, []int{180} } func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5176,7 +5120,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} + return fileDescriptor_83c10c24ec417dc9, []int{181} } func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5204,7 +5148,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} + return fileDescriptor_83c10c24ec417dc9, []int{182} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5232,7 +5176,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} + return fileDescriptor_83c10c24ec417dc9, []int{183} } func (m *SecretProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5260,7 +5204,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} + return fileDescriptor_83c10c24ec417dc9, []int{184} } func (m *SecretReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5288,7 +5232,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} + return fileDescriptor_83c10c24ec417dc9, []int{185} } func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5316,7 +5260,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} + return fileDescriptor_83c10c24ec417dc9, []int{186} } func (m *SecurityContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5344,7 +5288,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} + return fileDescriptor_83c10c24ec417dc9, []int{187} } func (m *SerializedReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5372,7 +5316,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} + return fileDescriptor_83c10c24ec417dc9, []int{188} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5400,7 +5344,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} + return fileDescriptor_83c10c24ec417dc9, []int{189} } func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5428,7 +5372,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} + return fileDescriptor_83c10c24ec417dc9, []int{190} } func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5456,7 +5400,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } func (*ServiceAccountTokenProjection) ProtoMessage() {} func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} + return fileDescriptor_83c10c24ec417dc9, []int{191} } func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5484,7 +5428,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} + return fileDescriptor_83c10c24ec417dc9, []int{192} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5512,7 +5456,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} + return fileDescriptor_83c10c24ec417dc9, []int{193} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5540,7 +5484,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} + return fileDescriptor_83c10c24ec417dc9, []int{194} } func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5568,7 +5512,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} + return fileDescriptor_83c10c24ec417dc9, []int{195} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5596,7 +5540,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} + return fileDescriptor_83c10c24ec417dc9, []int{196} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5624,7 +5568,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} + return fileDescriptor_83c10c24ec417dc9, []int{197} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5649,38 +5593,10 @@ func (m *SessionAffinityConfig) XXX_DiscardUnknown() { var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo -func (m *SleepAction) Reset() { *m = SleepAction{} } -func (*SleepAction) ProtoMessage() {} -func (*SleepAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} -} -func (m *SleepAction) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SleepAction) XXX_Merge(src proto.Message) { - xxx_messageInfo_SleepAction.Merge(m, src) -} -func (m *SleepAction) XXX_Size() int { - return m.Size() -} -func (m *SleepAction) XXX_DiscardUnknown() { - xxx_messageInfo_SleepAction.DiscardUnknown(m) -} - -var xxx_messageInfo_SleepAction proto.InternalMessageInfo - func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} + return fileDescriptor_83c10c24ec417dc9, []int{198} } func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5708,7 +5624,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} + return fileDescriptor_83c10c24ec417dc9, []int{199} } func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5736,7 +5652,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} + return fileDescriptor_83c10c24ec417dc9, []int{200} } func (m *Sysctl) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5764,7 +5680,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{204} + return fileDescriptor_83c10c24ec417dc9, []int{201} } func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5792,7 +5708,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{205} + return fileDescriptor_83c10c24ec417dc9, []int{202} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5820,7 +5736,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{206} + return fileDescriptor_83c10c24ec417dc9, []int{203} } func (m *Toleration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5848,7 +5764,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } func (*TopologySelectorLabelRequirement) ProtoMessage() {} func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{207} + return fileDescriptor_83c10c24ec417dc9, []int{204} } func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5876,7 +5792,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } func (*TopologySelectorTerm) ProtoMessage() {} func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{208} + return fileDescriptor_83c10c24ec417dc9, []int{205} } func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5904,7 +5820,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } func (*TopologySpreadConstraint) ProtoMessage() {} func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{209} + return fileDescriptor_83c10c24ec417dc9, []int{206} } func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5932,7 +5848,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{210} + return fileDescriptor_83c10c24ec417dc9, []int{207} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5960,7 +5876,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} } func (*TypedObjectReference) ProtoMessage() {} func (*TypedObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{211} + return fileDescriptor_83c10c24ec417dc9, []int{208} } func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -5988,7 +5904,7 @@ var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{212} + return fileDescriptor_83c10c24ec417dc9, []int{209} } func (m *Volume) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6016,7 +5932,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } func (*VolumeDevice) ProtoMessage() {} func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{213} + return fileDescriptor_83c10c24ec417dc9, []int{210} } func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6044,7 +5960,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{214} + return fileDescriptor_83c10c24ec417dc9, []int{211} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6072,7 +5988,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } func (*VolumeNodeAffinity) ProtoMessage() {} func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{215} + return fileDescriptor_83c10c24ec417dc9, []int{212} } func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6100,7 +6016,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{216} + return fileDescriptor_83c10c24ec417dc9, []int{213} } func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6125,38 +6041,10 @@ func (m *VolumeProjection) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo -func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} } -func (*VolumeResourceRequirements) ProtoMessage() {} -func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{217} -} -func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeResourceRequirements) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeResourceRequirements.Merge(m, src) -} -func (m *VolumeResourceRequirements) XXX_Size() int { - return m.Size() -} -func (m *VolumeResourceRequirements) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeResourceRequirements.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo - func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{218} + return fileDescriptor_83c10c24ec417dc9, []int{214} } func (m *VolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6184,7 +6072,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{219} + return fileDescriptor_83c10c24ec417dc9, []int{215} } func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6212,7 +6100,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{220} + return fileDescriptor_83c10c24ec417dc9, []int{216} } func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6240,7 +6128,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } func (*WindowsSecurityContextOptions) ProtoMessage() {} func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{221} + return fileDescriptor_83c10c24ec417dc9, []int{217} } func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -6285,7 +6173,6 @@ func init() { proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") proto.RegisterType((*ClaimSource)(nil), "k8s.io.api.core.v1.ClaimSource") proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection") proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") @@ -6364,7 +6251,6 @@ func init() { proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*ModifyVolumeStatus)(nil), "k8s.io.api.core.v1.ModifyVolumeStatus") proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") @@ -6496,7 +6382,6 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction") proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") @@ -6513,9 +6398,6 @@ func init() { proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.LimitsEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.RequestsEntry") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") @@ -6527,974 +6409,934 @@ func init() { } var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 15465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x1c, 0xc9, - 0x75, 0x18, 0xcc, 0xea, 0x9e, 0xab, 0xdf, 0xdc, 0x39, 0x00, 0x76, 0x30, 0x0b, 0xa0, 0xb1, 0xb5, - 0xbb, 0x58, 0xec, 0x35, 0x20, 0xf6, 0x20, 0x97, 0xbb, 0xcb, 0x15, 0xe7, 0x04, 0x66, 0x31, 0x33, - 0xe8, 0xcd, 0x1e, 0x00, 0xe4, 0x72, 0xc9, 0x8f, 0x85, 0xee, 0x9c, 0x99, 0xe2, 0x74, 0x57, 0xf5, - 0x56, 0x55, 0x0f, 0x30, 0xf8, 0xc8, 0x90, 0x44, 0x7d, 0xa2, 0x44, 0x4a, 0x5f, 0x04, 0xe3, 0x0b, - 0x7d, 0x47, 0x50, 0x0a, 0xc5, 0x17, 0x92, 0xac, 0xc3, 0xb4, 0x64, 0xd3, 0x94, 0x25, 0x59, 0xd4, - 0xe5, 0x2b, 0x2c, 0x29, 0x1c, 0xb2, 0xac, 0x08, 0x8b, 0x0a, 0x2b, 0x3c, 0x32, 0x21, 0x47, 0x28, - 0xf4, 0xc3, 0x92, 0x7c, 0xfc, 0xb0, 0x61, 0xd9, 0x72, 0xe4, 0x59, 0x99, 0x75, 0x74, 0xf7, 0x60, - 0x07, 0xc3, 0x25, 0x63, 0xff, 0x75, 0xbf, 0xf7, 0xf2, 0x65, 0x56, 0x9e, 0x2f, 0xdf, 0x7b, 0xf9, - 0x1e, 0xbc, 0xb2, 0xf3, 0x52, 0x38, 0xeb, 0xfa, 0x17, 0x76, 0xda, 0x37, 0x49, 0xe0, 0x91, 0x88, - 0x84, 0x17, 0x76, 0x89, 0x57, 0xf7, 0x83, 0x0b, 0x02, 0xe1, 0xb4, 0xdc, 0x0b, 0x35, 0x3f, 0x20, - 0x17, 0x76, 0x2f, 0x5e, 0xd8, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0xd4, 0x67, 0x5b, 0x81, 0x1f, 0xf9, - 0x08, 0x71, 0x9a, 0x59, 0xa7, 0xe5, 0xce, 0x52, 0x9a, 0xd9, 0xdd, 0x8b, 0x33, 0xcf, 0x6e, 0xb9, - 0xd1, 0x76, 0xfb, 0xe6, 0x6c, 0xcd, 0x6f, 0x5e, 0xd8, 0xf2, 0xb7, 0xfc, 0x0b, 0x8c, 0xf4, 0x66, - 0x7b, 0x93, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xe6, 0x85, 0xb8, 0x9a, 0xa6, 0x53, 0xdb, - 0x76, 0x3d, 0x12, 0xec, 0x5d, 0x68, 0xed, 0x6c, 0xb1, 0x7a, 0x03, 0x12, 0xfa, 0xed, 0xa0, 0x46, - 0x92, 0x15, 0x77, 0x2c, 0x15, 0x5e, 0x68, 0x92, 0xc8, 0xc9, 0x68, 0xee, 0xcc, 0x85, 0xbc, 0x52, - 0x41, 0xdb, 0x8b, 0xdc, 0x66, 0xba, 0x9a, 0x0f, 0x74, 0x2b, 0x10, 0xd6, 0xb6, 0x49, 0xd3, 0x49, - 0x95, 0x7b, 0x3e, 0xaf, 0x5c, 0x3b, 0x72, 0x1b, 0x17, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc8, - 0xfe, 0x86, 0x05, 0x67, 0xe7, 0x6e, 0x54, 0x97, 0x1a, 0x4e, 0x18, 0xb9, 0xb5, 0xf9, 0x86, 0x5f, - 0xdb, 0xa9, 0x46, 0x7e, 0x40, 0xae, 0xfb, 0x8d, 0x76, 0x93, 0x54, 0x59, 0x47, 0xa0, 0x67, 0x60, - 0x68, 0x97, 0xfd, 0x5f, 0x59, 0x9c, 0xb6, 0xce, 0x5a, 0xe7, 0x4b, 0xf3, 0x13, 0xbf, 0xbd, 0x5f, - 0x7e, 0xdf, 0xdd, 0xfd, 0xf2, 0xd0, 0x75, 0x01, 0xc7, 0x8a, 0x02, 0x9d, 0x83, 0x81, 0xcd, 0x70, - 0x63, 0xaf, 0x45, 0xa6, 0x0b, 0x8c, 0x76, 0x4c, 0xd0, 0x0e, 0x2c, 0x57, 0x29, 0x14, 0x0b, 0x2c, - 0xba, 0x00, 0xa5, 0x96, 0x13, 0x44, 0x6e, 0xe4, 0xfa, 0xde, 0x74, 0xf1, 0xac, 0x75, 0xbe, 0x7f, - 0x7e, 0x52, 0x90, 0x96, 0x2a, 0x12, 0x81, 0x63, 0x1a, 0xda, 0x8c, 0x80, 0x38, 0xf5, 0xab, 0x5e, - 0x63, 0x6f, 0xba, 0xef, 0xac, 0x75, 0x7e, 0x28, 0x6e, 0x06, 0x16, 0x70, 0xac, 0x28, 0xec, 0x2f, - 0x17, 0x60, 0x68, 0x6e, 0x73, 0xd3, 0xf5, 0xdc, 0x68, 0x0f, 0x5d, 0x87, 0x11, 0xcf, 0xaf, 0x13, - 0xf9, 0x9f, 0x7d, 0xc5, 0xf0, 0x73, 0x67, 0x67, 0xd3, 0x53, 0x69, 0x76, 0x5d, 0xa3, 0x9b, 0x9f, - 0xb8, 0xbb, 0x5f, 0x1e, 0xd1, 0x21, 0xd8, 0xe0, 0x83, 0x30, 0x0c, 0xb7, 0xfc, 0xba, 0x62, 0x5b, - 0x60, 0x6c, 0xcb, 0x59, 0x6c, 0x2b, 0x31, 0xd9, 0xfc, 0xf8, 0xdd, 0xfd, 0xf2, 0xb0, 0x06, 0xc0, - 0x3a, 0x13, 0x74, 0x13, 0xc6, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdf, 0x22, 0xe3, 0xfb, 0x68, 0x1e, - 0x5f, 0x8d, 0x74, 0x7e, 0xea, 0xee, 0x7e, 0x79, 0x3c, 0x01, 0xc4, 0x49, 0x86, 0xf6, 0x1d, 0x18, - 0x9b, 0x8b, 0x22, 0xa7, 0xb6, 0x4d, 0xea, 0x7c, 0x04, 0xd1, 0x0b, 0xd0, 0xe7, 0x39, 0x4d, 0x22, - 0xc6, 0xf7, 0xac, 0xe8, 0xd8, 0xbe, 0x75, 0xa7, 0x49, 0xee, 0xed, 0x97, 0x27, 0xae, 0x79, 0xee, - 0xdb, 0x6d, 0x31, 0x2b, 0x28, 0x0c, 0x33, 0x6a, 0xf4, 0x1c, 0x40, 0x9d, 0xec, 0xba, 0x35, 0x52, - 0x71, 0xa2, 0x6d, 0x31, 0xde, 0x48, 0x94, 0x85, 0x45, 0x85, 0xc1, 0x1a, 0x95, 0x7d, 0x1b, 0x4a, - 0x73, 0xbb, 0xbe, 0x5b, 0xaf, 0xf8, 0xf5, 0x10, 0xed, 0xc0, 0x78, 0x2b, 0x20, 0x9b, 0x24, 0x50, - 0xa0, 0x69, 0xeb, 0x6c, 0xf1, 0xfc, 0xf0, 0x73, 0xe7, 0x33, 0x3f, 0xd6, 0x24, 0x5d, 0xf2, 0xa2, - 0x60, 0x6f, 0xfe, 0x21, 0x51, 0xdf, 0x78, 0x02, 0x8b, 0x93, 0x9c, 0xed, 0x7f, 0x5a, 0x80, 0xe3, - 0x73, 0x77, 0xda, 0x01, 0x59, 0x74, 0xc3, 0x9d, 0xe4, 0x0c, 0xaf, 0xbb, 0xe1, 0xce, 0x7a, 0xdc, - 0x03, 0x6a, 0x6a, 0x2d, 0x0a, 0x38, 0x56, 0x14, 0xe8, 0x59, 0x18, 0xa4, 0xbf, 0xaf, 0xe1, 0x15, - 0xf1, 0xc9, 0x53, 0x82, 0x78, 0x78, 0xd1, 0x89, 0x9c, 0x45, 0x8e, 0xc2, 0x92, 0x06, 0xad, 0xc1, - 0x70, 0x8d, 0x2d, 0xc8, 0xad, 0x35, 0xbf, 0x4e, 0xd8, 0x60, 0x96, 0xe6, 0x9f, 0xa6, 0xe4, 0x0b, - 0x31, 0xf8, 0xde, 0x7e, 0x79, 0x9a, 0xb7, 0x4d, 0xb0, 0xd0, 0x70, 0x58, 0x2f, 0x8f, 0x6c, 0xb5, - 0xbe, 0xfa, 0x18, 0x27, 0xc8, 0x58, 0x5b, 0xe7, 0xb5, 0xa5, 0xd2, 0xcf, 0x96, 0xca, 0x48, 0xf6, - 0x32, 0x41, 0x17, 0xa1, 0x6f, 0xc7, 0xf5, 0xea, 0xd3, 0x03, 0x8c, 0xd7, 0x69, 0x3a, 0xe6, 0x57, - 0x5c, 0xaf, 0x7e, 0x6f, 0xbf, 0x3c, 0x69, 0x34, 0x87, 0x02, 0x31, 0x23, 0xb5, 0xff, 0xb3, 0x05, - 0x65, 0x86, 0x5b, 0x76, 0x1b, 0xa4, 0x42, 0x82, 0xd0, 0x0d, 0x23, 0xe2, 0x45, 0x46, 0x87, 0x3e, - 0x07, 0x10, 0x92, 0x5a, 0x40, 0x22, 0xad, 0x4b, 0xd5, 0xc4, 0xa8, 0x2a, 0x0c, 0xd6, 0xa8, 0xe8, - 0x86, 0x10, 0x6e, 0x3b, 0x01, 0x9b, 0x5f, 0xa2, 0x63, 0xd5, 0x86, 0x50, 0x95, 0x08, 0x1c, 0xd3, - 0x18, 0x1b, 0x42, 0xb1, 0xdb, 0x86, 0x80, 0x3e, 0x0c, 0xe3, 0x71, 0x65, 0x61, 0xcb, 0xa9, 0xc9, - 0x0e, 0x64, 0x4b, 0xa6, 0x6a, 0xa2, 0x70, 0x92, 0xd6, 0xfe, 0xdb, 0x96, 0x98, 0x3c, 0xf4, 0xab, - 0xdf, 0xe5, 0xdf, 0x6a, 0xff, 0x8a, 0x05, 0x83, 0xf3, 0xae, 0x57, 0x77, 0xbd, 0x2d, 0xf4, 0x29, - 0x18, 0xa2, 0x67, 0x53, 0xdd, 0x89, 0x1c, 0xb1, 0xef, 0xbd, 0x5f, 0x5b, 0x5b, 0xea, 0xa8, 0x98, - 0x6d, 0xed, 0x6c, 0x51, 0x40, 0x38, 0x4b, 0xa9, 0xe9, 0x6a, 0xbb, 0x7a, 0xf3, 0xd3, 0xa4, 0x16, - 0xad, 0x91, 0xc8, 0x89, 0x3f, 0x27, 0x86, 0x61, 0xc5, 0x15, 0x5d, 0x81, 0x81, 0xc8, 0x09, 0xb6, - 0x48, 0x24, 0x36, 0xc0, 0xcc, 0x8d, 0x8a, 0x97, 0xc4, 0x74, 0x45, 0x12, 0xaf, 0x46, 0xe2, 0x63, - 0x61, 0x83, 0x15, 0xc5, 0x82, 0x85, 0xfd, 0x3f, 0x06, 0xe1, 0xe4, 0x42, 0x75, 0x25, 0x67, 0x5e, - 0x9d, 0x83, 0x81, 0x7a, 0xe0, 0xee, 0x92, 0x40, 0xf4, 0xb3, 0xe2, 0xb2, 0xc8, 0xa0, 0x58, 0x60, - 0xd1, 0x4b, 0x30, 0xc2, 0x0f, 0xa4, 0xcb, 0x8e, 0x57, 0x6f, 0xc8, 0x2e, 0x3e, 0x26, 0xa8, 0x47, - 0xae, 0x6b, 0x38, 0x6c, 0x50, 0x1e, 0x70, 0x52, 0x9d, 0x4b, 0x2c, 0xc6, 0xbc, 0xc3, 0xee, 0x0b, - 0x16, 0x4c, 0xf0, 0x6a, 0xe6, 0xa2, 0x28, 0x70, 0x6f, 0xb6, 0x23, 0x12, 0x4e, 0xf7, 0xb3, 0x9d, - 0x6e, 0x21, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xf6, 0x7a, 0x82, 0x0b, 0xdf, 0x04, 0xa7, 0x45, 0xbd, - 0x13, 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xcf, 0x82, 0x99, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, - 0x90, 0xa0, 0xd2, 0xbe, 0xd9, 0x70, 0xc3, 0x6d, 0x3e, 0x4f, 0x31, 0xd9, 0x64, 0x3b, 0x41, 0xce, - 0x18, 0x2a, 0x22, 0x31, 0x86, 0x67, 0xee, 0xee, 0x97, 0x67, 0x16, 0x72, 0x59, 0xe1, 0x0e, 0xd5, - 0xa0, 0x1d, 0x40, 0xf4, 0x28, 0xad, 0x46, 0xce, 0x16, 0x89, 0x2b, 0x1f, 0xec, 0xbd, 0xf2, 0x13, - 0x77, 0xf7, 0xcb, 0x68, 0x3d, 0xc5, 0x02, 0x67, 0xb0, 0x45, 0x6f, 0xc3, 0x31, 0x0a, 0x4d, 0x7d, - 0xeb, 0x50, 0xef, 0xd5, 0x4d, 0xdf, 0xdd, 0x2f, 0x1f, 0x5b, 0xcf, 0x60, 0x82, 0x33, 0x59, 0xa3, - 0xef, 0xb1, 0xe0, 0x64, 0xfc, 0xf9, 0x4b, 0xb7, 0x5b, 0x8e, 0x57, 0x8f, 0x2b, 0x2e, 0xf5, 0x5e, - 0x31, 0xdd, 0x93, 0x4f, 0x2e, 0xe4, 0x71, 0xc2, 0xf9, 0x95, 0x20, 0x0f, 0xa6, 0x68, 0xd3, 0x92, - 0x75, 0x43, 0xef, 0x75, 0x3f, 0x74, 0x77, 0xbf, 0x3c, 0xb5, 0x9e, 0xe6, 0x81, 0xb3, 0x18, 0xcf, - 0x2c, 0xc0, 0xf1, 0xcc, 0xd9, 0x89, 0x26, 0xa0, 0xb8, 0x43, 0xb8, 0xd4, 0x55, 0xc2, 0xf4, 0x27, - 0x3a, 0x06, 0xfd, 0xbb, 0x4e, 0xa3, 0x2d, 0x16, 0x26, 0xe6, 0x7f, 0x5e, 0x2e, 0xbc, 0x64, 0xd9, - 0xff, 0xac, 0x08, 0xe3, 0x0b, 0xd5, 0x95, 0xfb, 0x5a, 0xf5, 0xfa, 0xb1, 0x57, 0xe8, 0x78, 0xec, - 0xc5, 0x87, 0x68, 0x31, 0xf7, 0x10, 0xfd, 0xee, 0x8c, 0x25, 0xdb, 0xc7, 0x96, 0xec, 0x87, 0x72, - 0x96, 0xec, 0x21, 0x2f, 0xd4, 0xdd, 0x9c, 0x59, 0xdb, 0xcf, 0x06, 0x30, 0x53, 0x42, 0x5a, 0xf5, - 0x6b, 0x4e, 0x23, 0xb9, 0xd5, 0x1e, 0x70, 0xea, 0x1e, 0xce, 0x38, 0xd6, 0x60, 0x64, 0xc1, 0x69, - 0x39, 0x37, 0xdd, 0x86, 0x1b, 0xb9, 0x24, 0x44, 0x4f, 0x40, 0xd1, 0xa9, 0xd7, 0x99, 0x74, 0x57, - 0x9a, 0x3f, 0x7e, 0x77, 0xbf, 0x5c, 0x9c, 0xab, 0x53, 0x31, 0x03, 0x14, 0xd5, 0x1e, 0xa6, 0x14, - 0xe8, 0x29, 0xe8, 0xab, 0x07, 0x7e, 0x6b, 0xba, 0xc0, 0x28, 0xe9, 0x2a, 0xef, 0x5b, 0x0c, 0xfc, - 0x56, 0x82, 0x94, 0xd1, 0xd8, 0xbf, 0x55, 0x80, 0x53, 0x0b, 0xa4, 0xb5, 0xbd, 0x5c, 0xcd, 0x39, - 0x2f, 0xce, 0xc3, 0x50, 0xd3, 0xf7, 0xdc, 0xc8, 0x0f, 0x42, 0x51, 0x35, 0x9b, 0x11, 0x6b, 0x02, - 0x86, 0x15, 0x16, 0x9d, 0x85, 0xbe, 0x56, 0x2c, 0xc4, 0x8e, 0x48, 0x01, 0x98, 0x89, 0xaf, 0x0c, - 0x43, 0x29, 0xda, 0x21, 0x09, 0xc4, 0x8c, 0x51, 0x14, 0xd7, 0x42, 0x12, 0x60, 0x86, 0x89, 0x25, - 0x01, 0x2a, 0x23, 0x88, 0x13, 0x21, 0x21, 0x09, 0x50, 0x0c, 0xd6, 0xa8, 0x50, 0x05, 0x4a, 0x61, - 0x62, 0x64, 0x7b, 0x5a, 0x9a, 0xa3, 0x4c, 0x54, 0x50, 0x23, 0x19, 0x33, 0x31, 0x4e, 0xb0, 0x81, - 0xae, 0xa2, 0xc2, 0xd7, 0x0b, 0x80, 0x78, 0x17, 0x7e, 0x9b, 0x75, 0xdc, 0xb5, 0x74, 0xc7, 0xf5, - 0xbe, 0x24, 0x0e, 0xab, 0xf7, 0xfe, 0x8b, 0x05, 0xa7, 0x16, 0x5c, 0xaf, 0x4e, 0x82, 0x9c, 0x09, - 0xf8, 0x60, 0xee, 0xce, 0x07, 0x13, 0x52, 0x8c, 0x29, 0xd6, 0x77, 0x08, 0x53, 0xcc, 0xfe, 0x4b, - 0x0b, 0x10, 0xff, 0xec, 0x77, 0xdd, 0xc7, 0x5e, 0x4b, 0x7f, 0xec, 0x21, 0x4c, 0x0b, 0xfb, 0xef, - 0x5a, 0x30, 0xbc, 0xd0, 0x70, 0xdc, 0xa6, 0xf8, 0xd4, 0x05, 0x98, 0x94, 0x8a, 0x22, 0x06, 0xd6, - 0x64, 0x7f, 0xba, 0xb9, 0x4d, 0xe2, 0x24, 0x12, 0xa7, 0xe9, 0xd1, 0xc7, 0xe1, 0xa4, 0x01, 0xdc, - 0x20, 0xcd, 0x56, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, - 0xaf, 0xc2, 0xd8, 0x42, 0xc3, 0x25, 0x5e, 0xb4, 0x52, 0x59, 0xf0, 0xbd, 0x4d, 0x77, 0x0b, 0xbd, - 0x0c, 0x63, 0x91, 0xdb, 0x24, 0x7e, 0x3b, 0xaa, 0x92, 0x9a, 0xef, 0xb1, 0xbb, 0xb6, 0x75, 0xbe, - 0x7f, 0x1e, 0xdd, 0xdd, 0x2f, 0x8f, 0x6d, 0x18, 0x18, 0x9c, 0xa0, 0xb4, 0x7f, 0x9a, 0xee, 0xb4, - 0x8d, 0x76, 0x18, 0x91, 0x60, 0x23, 0x68, 0x87, 0xd1, 0x7c, 0x9b, 0x4a, 0xcb, 0x95, 0xc0, 0xa7, - 0x1d, 0xe8, 0xfa, 0x1e, 0x3a, 0x65, 0x28, 0x10, 0x86, 0xa4, 0xf2, 0x40, 0x28, 0x0a, 0x66, 0x01, - 0x42, 0x77, 0xcb, 0x23, 0x81, 0xf6, 0x69, 0x63, 0x6c, 0x71, 0x2b, 0x28, 0xd6, 0x28, 0x50, 0x03, - 0x46, 0x1b, 0xce, 0x4d, 0xd2, 0xa8, 0x92, 0x06, 0xa9, 0x45, 0x7e, 0x20, 0x54, 0x20, 0xcf, 0xf7, - 0x76, 0x73, 0x59, 0xd5, 0x8b, 0xce, 0x4f, 0xde, 0xdd, 0x2f, 0x8f, 0x1a, 0x20, 0x6c, 0x32, 0xa7, - 0x9b, 0x9d, 0xdf, 0xa2, 0x5f, 0xe1, 0x34, 0xf4, 0xeb, 0xf2, 0x55, 0x01, 0xc3, 0x0a, 0xab, 0x36, - 0xbb, 0xbe, 0xbc, 0xcd, 0xce, 0xfe, 0x63, 0xba, 0x34, 0xfc, 0x66, 0xcb, 0xf7, 0x88, 0x17, 0x2d, - 0xf8, 0x5e, 0x9d, 0x2b, 0xaf, 0x5e, 0x86, 0xbe, 0x88, 0x4e, 0x75, 0xde, 0x3d, 0xe7, 0x64, 0x41, - 0x3a, 0xc1, 0xef, 0xed, 0x97, 0x4f, 0xa4, 0x4b, 0xb0, 0x25, 0xc0, 0xca, 0xa0, 0x0f, 0xc1, 0x40, - 0x18, 0x39, 0x51, 0x3b, 0x14, 0x1d, 0xf7, 0x88, 0x5c, 0x28, 0x55, 0x06, 0xbd, 0xb7, 0x5f, 0x1e, - 0x57, 0xc5, 0x38, 0x08, 0x8b, 0x02, 0xe8, 0x49, 0x18, 0x6c, 0x92, 0x30, 0x74, 0xb6, 0xa4, 0xa0, - 0x33, 0x2e, 0xca, 0x0e, 0xae, 0x71, 0x30, 0x96, 0x78, 0xf4, 0x28, 0xf4, 0x93, 0x20, 0xf0, 0x03, - 0xf1, 0x6d, 0xa3, 0x82, 0xb0, 0x7f, 0x89, 0x02, 0x31, 0xc7, 0xd9, 0xff, 0xd2, 0x82, 0x71, 0xd5, - 0x56, 0x5e, 0xd7, 0x11, 0x5c, 0x30, 0xdf, 0x04, 0xa8, 0xc9, 0x0f, 0x0c, 0x99, 0x60, 0x30, 0xfc, - 0xdc, 0xb9, 0x4c, 0x19, 0x2c, 0xd5, 0x8d, 0x31, 0x67, 0x05, 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x75, - 0x0b, 0xa6, 0x12, 0x5f, 0xb4, 0xea, 0x86, 0x11, 0x7a, 0x2b, 0xf5, 0x55, 0xb3, 0x3d, 0x4e, 0x3e, - 0x37, 0xe4, 0xdf, 0xa4, 0x76, 0x29, 0x09, 0xd1, 0xbe, 0xe8, 0x32, 0xf4, 0xbb, 0x11, 0x69, 0xca, - 0x8f, 0x79, 0xb4, 0xe3, 0xc7, 0xf0, 0x56, 0xc5, 0x23, 0xb2, 0x42, 0x4b, 0x62, 0xce, 0xc0, 0xfe, - 0xad, 0x22, 0x94, 0xf8, 0xfa, 0x5e, 0x73, 0x5a, 0x47, 0x30, 0x16, 0x4f, 0x43, 0xc9, 0x6d, 0x36, - 0xdb, 0x91, 0x73, 0x53, 0x9c, 0xd4, 0x43, 0x7c, 0xd7, 0x5c, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0x0a, - 0xf4, 0xb1, 0xa6, 0xf0, 0xaf, 0x7c, 0x22, 0xfb, 0x2b, 0x45, 0xdb, 0x67, 0x17, 0x9d, 0xc8, 0xe1, - 0x42, 0xb2, 0x5a, 0x57, 0x14, 0x84, 0x19, 0x0b, 0xe4, 0x00, 0xdc, 0x74, 0x3d, 0x27, 0xd8, 0xa3, - 0xb0, 0xe9, 0x22, 0x63, 0xf8, 0x6c, 0x67, 0x86, 0xf3, 0x8a, 0x9e, 0xb3, 0x55, 0x1f, 0x16, 0x23, - 0xb0, 0xc6, 0x74, 0xe6, 0x83, 0x50, 0x52, 0xc4, 0x07, 0x91, 0x75, 0x67, 0x3e, 0x0c, 0xe3, 0x89, - 0xba, 0xba, 0x15, 0x1f, 0xd1, 0x45, 0xe5, 0x5f, 0x65, 0x5b, 0x86, 0x68, 0xf5, 0x92, 0xb7, 0x2b, - 0x8e, 0x98, 0x3b, 0x70, 0xac, 0x91, 0x71, 0x48, 0x89, 0x71, 0xed, 0xfd, 0x50, 0x3b, 0x25, 0x3e, - 0xfb, 0x58, 0x16, 0x16, 0x67, 0xd6, 0x61, 0xec, 0x88, 0x85, 0x4e, 0x3b, 0x22, 0xdd, 0xef, 0x8e, - 0xa9, 0xc6, 0x5f, 0x21, 0x7b, 0x6a, 0x53, 0xfd, 0x56, 0x36, 0xff, 0x34, 0xef, 0x7d, 0xbe, 0x5d, - 0x0e, 0x0b, 0x06, 0xc5, 0x2b, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0x8e, 0x5f, 0xf7, 0x55, - 0x0b, 0x46, 0xd5, 0xd7, 0x1d, 0xc1, 0xbe, 0x30, 0x6f, 0xee, 0x0b, 0xa7, 0x3b, 0x4e, 0xf0, 0x9c, - 0x1d, 0xe1, 0xeb, 0x05, 0x38, 0xa9, 0x68, 0xe8, 0xb5, 0x8f, 0xff, 0x11, 0xb3, 0xea, 0x02, 0x94, - 0x3c, 0xa5, 0x00, 0xb5, 0x4c, 0xcd, 0x63, 0xac, 0xfe, 0x8c, 0x69, 0xe8, 0x91, 0xe7, 0xc5, 0x87, - 0xf6, 0x88, 0x6e, 0x19, 0x10, 0x87, 0xfb, 0x3c, 0x14, 0xdb, 0x6e, 0x5d, 0x1c, 0x30, 0xef, 0x97, - 0xbd, 0x7d, 0x6d, 0x65, 0xf1, 0xde, 0x7e, 0xf9, 0x91, 0x3c, 0xab, 0x14, 0x3d, 0xd9, 0xc2, 0xd9, - 0x6b, 0x2b, 0x8b, 0x98, 0x16, 0x46, 0x73, 0x30, 0x2e, 0x45, 0x99, 0xeb, 0x54, 0x92, 0xf6, 0x3d, - 0x71, 0x0e, 0x29, 0xf5, 0x3e, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0x16, 0x61, 0x62, 0xa7, 0x7d, 0x93, - 0x34, 0x48, 0xc4, 0x3f, 0xf8, 0x0a, 0xe1, 0xca, 0xef, 0x52, 0x7c, 0xe9, 0xbe, 0x92, 0xc0, 0xe3, - 0x54, 0x09, 0xfb, 0x6f, 0xd8, 0x79, 0x20, 0x7a, 0x4f, 0x93, 0x6f, 0xbe, 0x95, 0xd3, 0xb9, 0x97, - 0x59, 0x71, 0x85, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x9e, 0x15, 0xc6, 0x9c, 0xef, 0xeb, 0x38, - 0xe7, 0x7f, 0xb1, 0x00, 0xc7, 0x55, 0x0f, 0x18, 0xf2, 0xfd, 0xb7, 0x7b, 0x1f, 0x5c, 0x84, 0xe1, - 0x3a, 0xd9, 0x74, 0xda, 0x8d, 0x48, 0x59, 0x62, 0xfa, 0xb9, 0x35, 0x6e, 0x31, 0x06, 0x63, 0x9d, - 0xe6, 0x00, 0xdd, 0xf6, 0xf3, 0xa3, 0xec, 0x20, 0x8e, 0x1c, 0x3a, 0xc7, 0xd5, 0xaa, 0xb1, 0x72, - 0x57, 0xcd, 0xa3, 0xd0, 0xef, 0x36, 0xa9, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x15, 0x0a, 0xc4, 0x1c, - 0x87, 0x1e, 0x87, 0xc1, 0x9a, 0xdf, 0x6c, 0x3a, 0x5e, 0x9d, 0x1d, 0x79, 0xa5, 0xf9, 0x61, 0x2a, - 0xbb, 0x2d, 0x70, 0x10, 0x96, 0x38, 0x2a, 0x7c, 0x3b, 0xc1, 0x16, 0x57, 0x4f, 0x09, 0xe1, 0x7b, - 0x2e, 0xd8, 0x0a, 0x31, 0x83, 0xd2, 0xdb, 0xf5, 0x2d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x45, 0x37, - 0x10, 0x4b, 0x42, 0x9d, 0x85, 0x37, 0x14, 0x06, 0x6b, 0x54, 0x68, 0x19, 0xfa, 0x5b, 0x7e, 0x10, - 0x85, 0xd3, 0x03, 0xac, 0xbb, 0x1f, 0xc9, 0xd9, 0x88, 0xf8, 0xd7, 0x56, 0xfc, 0x20, 0x8a, 0x3f, - 0x80, 0xfe, 0x0b, 0x31, 0x2f, 0x8e, 0x56, 0x61, 0x90, 0x78, 0xbb, 0xcb, 0x81, 0xdf, 0x9c, 0x9e, - 0xca, 0xe7, 0xb4, 0xc4, 0x49, 0xf8, 0x34, 0x8b, 0x65, 0x54, 0x01, 0xc6, 0x92, 0x05, 0xfa, 0x10, - 0x14, 0x89, 0xb7, 0x3b, 0x3d, 0xc8, 0x38, 0xcd, 0xe4, 0x70, 0xba, 0xee, 0x04, 0xf1, 0x9e, 0xbf, - 0xe4, 0xed, 0x62, 0x5a, 0x06, 0x7d, 0x0c, 0x4a, 0x72, 0xc3, 0x08, 0x85, 0xde, 0x37, 0x73, 0xc2, - 0xca, 0x6d, 0x06, 0x93, 0xb7, 0xdb, 0x6e, 0x40, 0x9a, 0xc4, 0x8b, 0xc2, 0x78, 0x87, 0x94, 0xd8, - 0x10, 0xc7, 0xdc, 0x50, 0x0d, 0x46, 0x02, 0x12, 0xba, 0x77, 0x48, 0xc5, 0x6f, 0xb8, 0xb5, 0xbd, - 0xe9, 0x87, 0x58, 0xf3, 0x9e, 0xec, 0xd8, 0x65, 0x58, 0x2b, 0x10, 0xdb, 0x25, 0x74, 0x28, 0x36, - 0x98, 0xa2, 0x37, 0x60, 0x34, 0x20, 0x61, 0xe4, 0x04, 0x91, 0xa8, 0x65, 0x5a, 0xd9, 0x11, 0x47, - 0xb1, 0x8e, 0xe0, 0xd7, 0x89, 0xb8, 0x9a, 0x18, 0x83, 0x4d, 0x0e, 0xe8, 0x63, 0xd2, 0x48, 0xb2, - 0xe6, 0xb7, 0xbd, 0x28, 0x9c, 0x2e, 0xb1, 0x76, 0x67, 0x9a, 0xaf, 0xaf, 0xc7, 0x74, 0x49, 0x2b, - 0x0a, 0x2f, 0x8c, 0x0d, 0x56, 0xe8, 0x13, 0x30, 0xca, 0xff, 0x73, 0x23, 0x70, 0x38, 0x7d, 0x9c, - 0xf1, 0x3e, 0x9b, 0xcf, 0x9b, 0x13, 0xce, 0x1f, 0x17, 0xcc, 0x47, 0x75, 0x68, 0x88, 0x4d, 0x6e, - 0x08, 0xc3, 0x68, 0xc3, 0xdd, 0x25, 0x1e, 0x09, 0xc3, 0x4a, 0xe0, 0xdf, 0x24, 0x42, 0xa7, 0x7d, - 0x32, 0xdb, 0x68, 0xec, 0xdf, 0x24, 0xe2, 0x12, 0xa8, 0x97, 0xc1, 0x26, 0x0b, 0x74, 0x0d, 0xc6, - 0x02, 0xe2, 0xd4, 0xdd, 0x98, 0xe9, 0x70, 0x37, 0xa6, 0xec, 0xe2, 0x8c, 0x8d, 0x42, 0x38, 0xc1, - 0x04, 0x5d, 0x85, 0x11, 0xd6, 0xe7, 0xed, 0x16, 0x67, 0x7a, 0xa2, 0x1b, 0x53, 0xe6, 0x73, 0x50, - 0xd5, 0x8a, 0x60, 0x83, 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x93, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, - 0x3d, 0xc2, 0xb8, 0x65, 0x6e, 0x86, 0xab, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, - 0xeb, 0x70, 0x22, 0x22, 0x41, 0xd3, 0xf5, 0x1c, 0xba, 0x89, 0x89, 0x2b, 0x21, 0xb3, 0xe5, 0x8f, - 0xb2, 0xd9, 0x75, 0x46, 0x8c, 0xc6, 0x89, 0x8d, 0x4c, 0x2a, 0x9c, 0x53, 0x1a, 0xdd, 0x86, 0xe9, - 0x0c, 0x0c, 0x9f, 0xb7, 0xc7, 0x18, 0xe7, 0x57, 0x05, 0xe7, 0xe9, 0x8d, 0x1c, 0xba, 0x7b, 0x1d, - 0x70, 0x38, 0x97, 0x3b, 0xba, 0x0a, 0xe3, 0x6c, 0xe7, 0xac, 0xb4, 0x1b, 0x0d, 0x51, 0xe1, 0x18, - 0xab, 0xf0, 0x71, 0x29, 0x47, 0xac, 0x98, 0xe8, 0x7b, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, - 0x74, 0x93, 0x99, 0x8d, 0xdb, 0x81, 0x1b, 0xed, 0xd1, 0x55, 0x45, 0x6e, 0x47, 0xd3, 0xe3, 0x1d, - 0x55, 0x68, 0x3a, 0xa9, 0xb2, 0x2d, 0xeb, 0x40, 0x9c, 0x64, 0x48, 0x8f, 0x82, 0x30, 0xaa, 0xbb, - 0xde, 0xf4, 0x04, 0xbf, 0x4f, 0xc9, 0x9d, 0xb4, 0x4a, 0x81, 0x98, 0xe3, 0x98, 0xc9, 0x98, 0xfe, - 0xb8, 0x4a, 0x4f, 0xdc, 0x49, 0x46, 0x18, 0x9b, 0x8c, 0x25, 0x02, 0xc7, 0x34, 0x54, 0x08, 0x8e, - 0xa2, 0xbd, 0x69, 0xc4, 0x48, 0xd5, 0x86, 0xb8, 0xb1, 0xf1, 0x31, 0x4c, 0xe1, 0xf6, 0x4d, 0x18, - 0x53, 0xdb, 0x04, 0xeb, 0x13, 0x54, 0x86, 0x7e, 0x26, 0xf6, 0x09, 0x85, 0x6f, 0x89, 0x36, 0x81, - 0x89, 0x84, 0x98, 0xc3, 0x59, 0x13, 0xdc, 0x3b, 0x64, 0x7e, 0x2f, 0x22, 0x5c, 0x17, 0x51, 0xd4, - 0x9a, 0x20, 0x11, 0x38, 0xa6, 0xb1, 0xff, 0x27, 0x17, 0x9f, 0xe3, 0x53, 0xa2, 0x87, 0x73, 0xf1, - 0x19, 0x18, 0xda, 0xf6, 0xc3, 0x88, 0x52, 0xb3, 0x3a, 0xfa, 0x63, 0x81, 0xf9, 0xb2, 0x80, 0x63, - 0x45, 0x81, 0x5e, 0x81, 0xd1, 0x9a, 0x5e, 0x81, 0x38, 0xd4, 0xd5, 0x36, 0x62, 0xd4, 0x8e, 0x4d, - 0x5a, 0xf4, 0x12, 0x0c, 0x31, 0x37, 0xa8, 0x9a, 0xdf, 0x10, 0xd2, 0xa6, 0x94, 0x4c, 0x86, 0x2a, - 0x02, 0x7e, 0x4f, 0xfb, 0x8d, 0x15, 0x35, 0x3a, 0x07, 0x03, 0xb4, 0x09, 0x2b, 0x15, 0x71, 0x9c, - 0x2a, 0xdd, 0xe5, 0x65, 0x06, 0xc5, 0x02, 0x6b, 0xff, 0xba, 0xc5, 0x64, 0xa9, 0xf4, 0x9e, 0x8f, - 0x2e, 0xb3, 0x43, 0x83, 0x9d, 0x20, 0x9a, 0xee, 0xf0, 0x31, 0xed, 0x24, 0x50, 0xb8, 0x7b, 0x89, - 0xff, 0xd8, 0x28, 0x89, 0xde, 0x4c, 0x9e, 0x0c, 0x5c, 0xa0, 0x78, 0x41, 0x76, 0x41, 0xf2, 0x74, - 0x78, 0x38, 0x3e, 0xe2, 0x68, 0x7b, 0x3a, 0x1d, 0x11, 0xf6, 0xff, 0x55, 0xd0, 0x66, 0x49, 0x35, - 0x72, 0x22, 0x82, 0x2a, 0x30, 0x78, 0xcb, 0x71, 0x23, 0xd7, 0xdb, 0x12, 0x72, 0x5f, 0xe7, 0x83, - 0x8e, 0x15, 0xba, 0xc1, 0x0b, 0x70, 0xe9, 0x45, 0xfc, 0xc1, 0x92, 0x0d, 0xe5, 0x18, 0xb4, 0x3d, - 0x8f, 0x72, 0x2c, 0xf4, 0xca, 0x11, 0xf3, 0x02, 0x9c, 0xa3, 0xf8, 0x83, 0x25, 0x1b, 0xf4, 0x16, - 0x80, 0xdc, 0x21, 0x48, 0x5d, 0xe8, 0x0e, 0x9f, 0xe9, 0xce, 0x74, 0x43, 0x95, 0xe1, 0xca, 0xc9, - 0xf8, 0x3f, 0xd6, 0xf8, 0xd9, 0x91, 0x36, 0xa6, 0x7a, 0x63, 0xd0, 0xc7, 0xe9, 0x12, 0x75, 0x82, - 0x88, 0xd4, 0xe7, 0x22, 0xd1, 0x39, 0x4f, 0xf5, 0x76, 0x39, 0xdc, 0x70, 0x9b, 0x44, 0x5f, 0xce, - 0x82, 0x09, 0x8e, 0xf9, 0xd9, 0xbf, 0x5c, 0x84, 0xe9, 0xbc, 0xe6, 0xd2, 0x45, 0x43, 0x6e, 0xbb, - 0xd1, 0x02, 0x15, 0x6b, 0x2d, 0x73, 0xd1, 0x2c, 0x09, 0x38, 0x56, 0x14, 0x74, 0xf6, 0x86, 0xee, - 0x96, 0xbc, 0xdb, 0xf7, 0xc7, 0xb3, 0xb7, 0xca, 0xa0, 0x58, 0x60, 0x29, 0x5d, 0x40, 0x9c, 0x50, - 0xf8, 0xe7, 0x69, 0xb3, 0x1c, 0x33, 0x28, 0x16, 0x58, 0x5d, 0xcb, 0xd8, 0xd7, 0x45, 0xcb, 0x68, - 0x74, 0x51, 0xff, 0xe1, 0x76, 0x11, 0xfa, 0x24, 0xc0, 0xa6, 0xeb, 0xb9, 0xe1, 0x36, 0xe3, 0x3e, - 0x70, 0x60, 0xee, 0x4a, 0x28, 0x5e, 0x56, 0x5c, 0xb0, 0xc6, 0x11, 0xbd, 0x08, 0xc3, 0x6a, 0x03, - 0x59, 0x59, 0x64, 0xce, 0x0a, 0x9a, 0xf3, 0x57, 0xbc, 0x9b, 0x2e, 0x62, 0x9d, 0xce, 0xfe, 0x74, - 0x72, 0xbe, 0x88, 0x15, 0xa0, 0xf5, 0xaf, 0xd5, 0x6b, 0xff, 0x16, 0x3a, 0xf7, 0xaf, 0xfd, 0xcd, - 0x01, 0x18, 0x37, 0x2a, 0x6b, 0x87, 0x3d, 0xec, 0xb9, 0x97, 0xe8, 0x01, 0xe4, 0x44, 0x44, 0xac, - 0x3f, 0xbb, 0xfb, 0x52, 0xd1, 0x0f, 0x29, 0xba, 0x02, 0x78, 0x79, 0xf4, 0x49, 0x28, 0x35, 0x9c, - 0x90, 0x69, 0x2c, 0x89, 0x58, 0x77, 0xbd, 0x30, 0x8b, 0x2f, 0x84, 0x4e, 0x18, 0x69, 0xa7, 0x3e, - 0xe7, 0x1d, 0xb3, 0xa4, 0x27, 0x25, 0x95, 0xaf, 0xa4, 0x03, 0xa8, 0x6a, 0x04, 0x15, 0xc2, 0xf6, - 0x30, 0xc7, 0xa1, 0x97, 0xd8, 0xd6, 0x4a, 0x67, 0xc5, 0x02, 0x95, 0x46, 0xd9, 0x34, 0xeb, 0x37, - 0x84, 0x6c, 0x85, 0xc3, 0x06, 0x65, 0x7c, 0x27, 0x1b, 0xe8, 0x70, 0x27, 0x7b, 0x12, 0x06, 0xd9, - 0x0f, 0x35, 0x03, 0xd4, 0x68, 0xac, 0x70, 0x30, 0x96, 0xf8, 0xe4, 0x84, 0x19, 0xea, 0x6d, 0xc2, - 0xd0, 0x5b, 0x9f, 0x98, 0xd4, 0xcc, 0x51, 0x64, 0x88, 0xef, 0x72, 0x62, 0xca, 0x63, 0x89, 0x43, - 0x3f, 0x63, 0x01, 0x72, 0x1a, 0xf4, 0xb6, 0x4c, 0xc1, 0xea, 0x72, 0x03, 0x4c, 0xd4, 0x7e, 0xa5, - 0x6b, 0xb7, 0xb7, 0xc3, 0xd9, 0xb9, 0x54, 0x69, 0xae, 0x29, 0x7d, 0x59, 0x34, 0x11, 0xa5, 0x09, - 0xf4, 0xc3, 0x68, 0xd5, 0x0d, 0xa3, 0xcf, 0xfd, 0x49, 0xe2, 0x70, 0xca, 0x68, 0x12, 0xba, 0xa6, - 0x5f, 0xbe, 0x86, 0x0f, 0x78, 0xf9, 0x1a, 0xcd, 0xbb, 0x78, 0xcd, 0xb4, 0xe1, 0xa1, 0x9c, 0x2f, - 0xc8, 0xd0, 0xbf, 0x2e, 0xea, 0xfa, 0xd7, 0x2e, 0x5a, 0xbb, 0x59, 0x59, 0xc7, 0xec, 0x1b, 0x6d, - 0xc7, 0x8b, 0xdc, 0x68, 0x4f, 0xd7, 0xd7, 0x3e, 0x05, 0x63, 0x8b, 0x0e, 0x69, 0xfa, 0xde, 0x92, - 0x57, 0x6f, 0xf9, 0xae, 0x17, 0xa1, 0x69, 0xe8, 0x63, 0xc2, 0x07, 0xdf, 0x7a, 0xfb, 0x68, 0xef, - 0x61, 0x06, 0xb1, 0xb7, 0xe0, 0xf8, 0xa2, 0x7f, 0xcb, 0xbb, 0xe5, 0x04, 0xf5, 0xb9, 0xca, 0x8a, - 0xa6, 0x4f, 0x5a, 0x97, 0xfa, 0x0c, 0x2b, 0xff, 0xb6, 0xa8, 0x95, 0xe4, 0xd7, 0xa1, 0x65, 0xb7, - 0x41, 0x72, 0xb4, 0x7e, 0xff, 0x6f, 0xc1, 0xa8, 0x29, 0xa6, 0x57, 0x36, 0x2b, 0x2b, 0xd7, 0x40, - 0xff, 0x06, 0x0c, 0x6d, 0xba, 0xa4, 0x51, 0xc7, 0x64, 0x53, 0xf4, 0xce, 0x13, 0xf9, 0x2e, 0x7c, - 0xcb, 0x94, 0x52, 0x19, 0xd7, 0x98, 0x36, 0x64, 0x59, 0x14, 0xc6, 0x8a, 0x0d, 0xda, 0x81, 0x09, - 0xd9, 0x87, 0x12, 0x2b, 0xf6, 0x83, 0x27, 0x3b, 0x0d, 0xbc, 0xc9, 0xfc, 0xd8, 0xdd, 0xfd, 0xf2, - 0x04, 0x4e, 0xb0, 0xc1, 0x29, 0xc6, 0xe8, 0x14, 0xf4, 0x35, 0xe9, 0xc9, 0xd7, 0xc7, 0xba, 0x9f, - 0xa9, 0x3f, 0x98, 0x26, 0x87, 0x41, 0xed, 0x1f, 0xb3, 0xe0, 0xa1, 0x54, 0xcf, 0x08, 0x8d, 0xd6, - 0x21, 0x8f, 0x42, 0x52, 0xc3, 0x54, 0xe8, 0xae, 0x61, 0xb2, 0xff, 0x8e, 0x05, 0xc7, 0x96, 0x9a, - 0xad, 0x68, 0x6f, 0xd1, 0x35, 0xad, 0xe9, 0x1f, 0x84, 0x81, 0x26, 0xa9, 0xbb, 0xed, 0xa6, 0x18, - 0xb9, 0xb2, 0x3c, 0x1d, 0xd6, 0x18, 0xf4, 0xde, 0x7e, 0x79, 0xb4, 0x1a, 0xf9, 0x81, 0xb3, 0x45, - 0x38, 0x00, 0x0b, 0x72, 0x76, 0xc6, 0xba, 0x77, 0xc8, 0xaa, 0xdb, 0x74, 0xa3, 0xfb, 0x9b, 0xed, - 0xc2, 0x10, 0x2e, 0x99, 0xe0, 0x98, 0x9f, 0xfd, 0x0d, 0x0b, 0xc6, 0xe5, 0xbc, 0x9f, 0xab, 0xd7, - 0x03, 0x12, 0x86, 0x68, 0x06, 0x0a, 0x6e, 0x4b, 0xb4, 0x12, 0x44, 0x2b, 0x0b, 0x2b, 0x15, 0x5c, - 0x70, 0x5b, 0x52, 0x9c, 0x67, 0x07, 0x50, 0xd1, 0xf4, 0x09, 0xb8, 0x2c, 0xe0, 0x58, 0x51, 0xa0, - 0xf3, 0x30, 0xe4, 0xf9, 0x75, 0x2e, 0x11, 0x0b, 0x1b, 0x2b, 0xa5, 0x5c, 0x17, 0x30, 0xac, 0xb0, - 0xa8, 0x02, 0x25, 0xee, 0x31, 0x1a, 0x4f, 0xda, 0x9e, 0xfc, 0x4e, 0xd9, 0x97, 0x6d, 0xc8, 0x92, - 0x38, 0x66, 0x62, 0xff, 0xa6, 0x05, 0x23, 0xf2, 0xcb, 0x7a, 0xbc, 0xab, 0xd0, 0xa5, 0x15, 0xdf, - 0x53, 0xe2, 0xa5, 0x45, 0xef, 0x1a, 0x0c, 0x63, 0x5c, 0x31, 0x8a, 0x07, 0xba, 0x62, 0x5c, 0x84, - 0x61, 0xa7, 0xd5, 0xaa, 0x98, 0xf7, 0x13, 0x36, 0x95, 0xe6, 0x62, 0x30, 0xd6, 0x69, 0xec, 0x1f, - 0x2d, 0xc0, 0x98, 0xfc, 0x82, 0x6a, 0xfb, 0x66, 0x48, 0x22, 0xb4, 0x01, 0x25, 0x87, 0x8f, 0x12, - 0x91, 0x93, 0xfc, 0xd1, 0x6c, 0xbd, 0x99, 0x31, 0xa4, 0xb1, 0xa0, 0x35, 0x27, 0x4b, 0xe3, 0x98, - 0x11, 0x6a, 0xc0, 0xa4, 0xe7, 0x47, 0xec, 0xd0, 0x55, 0xf8, 0x4e, 0xa6, 0xcc, 0x24, 0xf7, 0x93, - 0x82, 0xfb, 0xe4, 0x7a, 0x92, 0x0b, 0x4e, 0x33, 0x46, 0x4b, 0x52, 0x17, 0x59, 0xcc, 0x57, 0x22, - 0xe9, 0x03, 0x97, 0xad, 0x8a, 0xb4, 0x7f, 0xcd, 0x82, 0x92, 0x24, 0x3b, 0x0a, 0xab, 0xf5, 0x1a, - 0x0c, 0x86, 0x6c, 0x10, 0x64, 0xd7, 0xd8, 0x9d, 0x1a, 0xce, 0xc7, 0x2b, 0x96, 0x25, 0xf8, 0xff, - 0x10, 0x4b, 0x1e, 0xcc, 0x14, 0xa5, 0x9a, 0xff, 0x2e, 0x31, 0x45, 0xa9, 0xf6, 0xe4, 0x1c, 0x4a, - 0x7f, 0xc6, 0xda, 0xac, 0xe9, 0x76, 0xa9, 0xc8, 0xdb, 0x0a, 0xc8, 0xa6, 0x7b, 0x3b, 0x29, 0xf2, - 0x56, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x0b, 0x46, 0x6a, 0xd2, 0x06, 0x11, 0xaf, 0xf0, 0x73, 0x1d, - 0xed, 0x61, 0xca, 0x74, 0xca, 0x75, 0x68, 0x0b, 0x5a, 0x79, 0x6c, 0x70, 0x33, 0x3d, 0xa2, 0x8a, - 0xdd, 0x3c, 0xa2, 0x62, 0xbe, 0xf9, 0xfe, 0x41, 0x3f, 0x6e, 0xc1, 0x00, 0xd7, 0x3d, 0xf7, 0xa6, - 0xfa, 0xd7, 0x2c, 0xc9, 0x71, 0xdf, 0x5d, 0xa7, 0x40, 0x21, 0x69, 0xa0, 0x35, 0x28, 0xb1, 0x1f, - 0x4c, 0x77, 0x5e, 0xcc, 0x7f, 0xb0, 0xc4, 0x6b, 0xd5, 0x1b, 0x78, 0x5d, 0x16, 0xc3, 0x31, 0x07, - 0xfb, 0x47, 0x8a, 0x74, 0x77, 0x8b, 0x49, 0x8d, 0x43, 0xdf, 0x7a, 0x70, 0x87, 0x7e, 0xe1, 0x41, - 0x1d, 0xfa, 0x5b, 0x30, 0x5e, 0xd3, 0xec, 0xce, 0xf1, 0x48, 0x9e, 0xef, 0x38, 0x49, 0x34, 0x13, - 0x35, 0xd7, 0xce, 0x2d, 0x98, 0x4c, 0x70, 0x92, 0x2b, 0xfa, 0x38, 0x8c, 0xf0, 0x71, 0x16, 0xb5, - 0x70, 0xa7, 0xb2, 0xc7, 0xf3, 0xe7, 0x8b, 0x5e, 0x05, 0xd7, 0xe6, 0x6a, 0xc5, 0xb1, 0xc1, 0xcc, - 0xfe, 0x2b, 0x0b, 0xd0, 0x52, 0x6b, 0x9b, 0x34, 0x49, 0xe0, 0x34, 0x62, 0xf3, 0xd1, 0x17, 0x2d, - 0x98, 0x26, 0x29, 0xf0, 0x82, 0xdf, 0x6c, 0x8a, 0xcb, 0x62, 0x8e, 0x3e, 0x63, 0x29, 0xa7, 0x8c, - 0x7a, 0xd1, 0x35, 0x9d, 0x47, 0x81, 0x73, 0xeb, 0x43, 0x6b, 0x30, 0xc5, 0x4f, 0x49, 0x85, 0xd0, - 0xbc, 0xb8, 0x1e, 0x16, 0x8c, 0xa7, 0x36, 0xd2, 0x24, 0x38, 0xab, 0x9c, 0xfd, 0x6b, 0xa3, 0x90, - 0xdb, 0x8a, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, 0xde, 0xb3, 0x9b, 0xbd, 0x67, 0x37, 0x7b, 0xcf, - 0x6e, 0xf6, 0x9e, 0xdd, 0xec, 0x5d, 0x6a, 0x37, 0xfb, 0xbf, 0x2d, 0x38, 0xae, 0x8e, 0x2f, 0xe3, - 0xc2, 0xfe, 0x19, 0x98, 0xe2, 0xcb, 0xcd, 0x70, 0xc6, 0x16, 0xc7, 0xf5, 0xc5, 0xcc, 0x99, 0x9b, - 0x78, 0x34, 0x60, 0x14, 0xe4, 0xaf, 0xaf, 0x32, 0x10, 0x38, 0xab, 0x1a, 0xfb, 0x97, 0x87, 0xa0, - 0x7f, 0x69, 0x97, 0x78, 0xd1, 0x11, 0x5c, 0x6d, 0x6a, 0x30, 0xe6, 0x7a, 0xbb, 0x7e, 0x63, 0x97, - 0xd4, 0x39, 0xfe, 0x20, 0x37, 0xf0, 0x13, 0x82, 0xf5, 0xd8, 0x8a, 0xc1, 0x02, 0x27, 0x58, 0x3e, - 0x08, 0xeb, 0xc3, 0x25, 0x18, 0xe0, 0x87, 0x8f, 0x30, 0x3d, 0x64, 0xee, 0xd9, 0xac, 0x13, 0xc5, - 0x91, 0x1a, 0x5b, 0x46, 0xf8, 0xe1, 0x26, 0x8a, 0xa3, 0x4f, 0xc3, 0xd8, 0xa6, 0x1b, 0x84, 0xd1, - 0x86, 0xdb, 0xa4, 0x47, 0x43, 0xb3, 0x75, 0x1f, 0xd6, 0x06, 0xd5, 0x0f, 0xcb, 0x06, 0x27, 0x9c, - 0xe0, 0x8c, 0xb6, 0x60, 0xb4, 0xe1, 0xe8, 0x55, 0x0d, 0x1e, 0xb8, 0x2a, 0x75, 0x3a, 0xac, 0xea, - 0x8c, 0xb0, 0xc9, 0x97, 0x2e, 0xa7, 0x1a, 0x53, 0x98, 0x0f, 0x31, 0x75, 0x86, 0x5a, 0x4e, 0x5c, - 0x53, 0xce, 0x71, 0x54, 0x40, 0x63, 0x8e, 0xec, 0x25, 0x53, 0x40, 0xd3, 0xdc, 0xd5, 0x3f, 0x05, - 0x25, 0x42, 0xbb, 0x90, 0x32, 0x16, 0x07, 0xcc, 0x85, 0xde, 0xda, 0xba, 0xe6, 0xd6, 0x02, 0xdf, - 0xb4, 0xf3, 0x2c, 0x49, 0x4e, 0x38, 0x66, 0x8a, 0x16, 0x60, 0x20, 0x24, 0x81, 0xab, 0x74, 0xc9, - 0x1d, 0x86, 0x91, 0x91, 0xf1, 0xe7, 0x7d, 0xfc, 0x37, 0x16, 0x45, 0xe9, 0xf4, 0x72, 0x98, 0x2a, - 0x96, 0x1d, 0x06, 0xda, 0xf4, 0x9a, 0x63, 0x50, 0x2c, 0xb0, 0xe8, 0x75, 0x18, 0x0c, 0x48, 0x83, - 0x19, 0x12, 0x47, 0x7b, 0x9f, 0xe4, 0xdc, 0x2e, 0xc9, 0xcb, 0x61, 0xc9, 0x00, 0x5d, 0x01, 0x14, - 0x10, 0x2a, 0xe0, 0xb9, 0xde, 0x96, 0x72, 0xef, 0x16, 0x1b, 0xad, 0x12, 0xa4, 0x71, 0x4c, 0x21, - 0x5f, 0x76, 0xe2, 0x8c, 0x62, 0xe8, 0x12, 0x4c, 0x2a, 0xe8, 0x8a, 0x17, 0x46, 0x0e, 0xdd, 0xe0, - 0xc6, 0x19, 0x2f, 0xa5, 0x5f, 0xc1, 0x49, 0x02, 0x9c, 0x2e, 0x63, 0xff, 0x9c, 0x05, 0xbc, 0x9f, - 0x8f, 0x40, 0xab, 0xf0, 0x9a, 0xa9, 0x55, 0x38, 0x99, 0x3b, 0x72, 0x39, 0x1a, 0x85, 0x9f, 0xb3, - 0x60, 0x58, 0x1b, 0xd9, 0x78, 0xce, 0x5a, 0x1d, 0xe6, 0x6c, 0x1b, 0x26, 0xe8, 0x4c, 0xbf, 0x7a, - 0x33, 0x24, 0xc1, 0x2e, 0xa9, 0xb3, 0x89, 0x59, 0xb8, 0xbf, 0x89, 0xa9, 0x5c, 0x49, 0x57, 0x13, - 0x0c, 0x71, 0xaa, 0x0a, 0xfb, 0x53, 0xb2, 0xa9, 0xca, 0xf3, 0xb6, 0xa6, 0xc6, 0x3c, 0xe1, 0x79, - 0xab, 0x46, 0x15, 0xc7, 0x34, 0x74, 0xa9, 0x6d, 0xfb, 0x61, 0x94, 0xf4, 0xbc, 0xbd, 0xec, 0x87, - 0x11, 0x66, 0x18, 0xfb, 0x79, 0x80, 0xa5, 0xdb, 0xa4, 0xc6, 0x67, 0xac, 0x7e, 0xe9, 0xb1, 0xf2, - 0x2f, 0x3d, 0xf6, 0x1f, 0x58, 0x30, 0xb6, 0xbc, 0x60, 0x9c, 0x5c, 0xb3, 0x00, 0xfc, 0xa6, 0x76, - 0xe3, 0xc6, 0xba, 0x74, 0xff, 0xe0, 0x16, 0x70, 0x05, 0xc5, 0x1a, 0x05, 0x3a, 0x09, 0xc5, 0x46, - 0xdb, 0x13, 0x6a, 0xcf, 0x41, 0x7a, 0x3c, 0xae, 0xb6, 0x3d, 0x4c, 0x61, 0xda, 0xab, 0xae, 0x62, - 0xcf, 0xaf, 0xba, 0xba, 0x46, 0x73, 0x41, 0x65, 0xe8, 0xbf, 0x75, 0xcb, 0xad, 0xf3, 0x37, 0xf3, - 0xc2, 0x35, 0xe5, 0xc6, 0x8d, 0x95, 0xc5, 0x10, 0x73, 0xb8, 0xfd, 0xa5, 0x22, 0xcc, 0x2c, 0x37, - 0xc8, 0xed, 0x77, 0x18, 0x37, 0xa0, 0xd7, 0x37, 0x69, 0x07, 0x53, 0x20, 0x1d, 0xf4, 0xdd, 0x61, - 0xf7, 0xfe, 0xd8, 0x84, 0x41, 0xee, 0x78, 0x2a, 0xa3, 0x08, 0x64, 0x9a, 0xfb, 0xf2, 0x3b, 0x64, - 0x96, 0x3b, 0xb0, 0x0a, 0x73, 0x9f, 0x3a, 0x30, 0x05, 0x14, 0x4b, 0xe6, 0x33, 0x2f, 0xc3, 0x88, - 0x4e, 0x79, 0xa0, 0x17, 0xc0, 0xdf, 0x5b, 0x84, 0x09, 0xda, 0x82, 0x07, 0x3a, 0x10, 0xd7, 0xd2, - 0x03, 0x71, 0xd8, 0xaf, 0x40, 0xbb, 0x8f, 0xc6, 0x5b, 0xc9, 0xd1, 0xb8, 0x98, 0x37, 0x1a, 0x47, - 0x3d, 0x06, 0xdf, 0x67, 0xc1, 0xd4, 0x72, 0xc3, 0xaf, 0xed, 0x24, 0x5e, 0x6a, 0xbe, 0x08, 0xc3, - 0x74, 0x3b, 0x0e, 0x8d, 0xa0, 0x25, 0x46, 0x18, 0x1b, 0x81, 0xc2, 0x3a, 0x9d, 0x56, 0xec, 0xda, - 0xb5, 0x95, 0xc5, 0xac, 0xe8, 0x37, 0x02, 0x85, 0x75, 0x3a, 0xfb, 0xf7, 0x2c, 0x38, 0x7d, 0x69, - 0x61, 0x29, 0x9e, 0x8a, 0xa9, 0x00, 0x3c, 0xe7, 0x60, 0xa0, 0x55, 0xd7, 0x9a, 0x12, 0xab, 0x85, - 0x17, 0x59, 0x2b, 0x04, 0xf6, 0xdd, 0x12, 0x5c, 0xea, 0x1a, 0xc0, 0x25, 0x5c, 0x59, 0x10, 0xfb, - 0xae, 0xb4, 0x02, 0x59, 0xb9, 0x56, 0xa0, 0xc7, 0x61, 0x90, 0x9e, 0x0b, 0x6e, 0x4d, 0xb6, 0x9b, - 0x1b, 0xf4, 0x39, 0x08, 0x4b, 0x9c, 0xfd, 0xb3, 0x16, 0x4c, 0x5d, 0x72, 0x23, 0x7a, 0x68, 0x27, - 0x23, 0xcc, 0xd0, 0x53, 0x3b, 0x74, 0x23, 0x3f, 0xd8, 0x4b, 0x46, 0x98, 0xc1, 0x0a, 0x83, 0x35, - 0x2a, 0xfe, 0x41, 0xbb, 0x2e, 0x7b, 0x49, 0x51, 0x30, 0xed, 0x6e, 0x58, 0xc0, 0xb1, 0xa2, 0xa0, - 0xfd, 0x55, 0x77, 0x03, 0xa6, 0xb2, 0xdc, 0x13, 0x1b, 0xb7, 0xea, 0xaf, 0x45, 0x89, 0xc0, 0x31, - 0x8d, 0xfd, 0x17, 0x16, 0x94, 0x2f, 0xf1, 0xf7, 0xa0, 0x9b, 0x61, 0xce, 0xa6, 0xfb, 0x3c, 0x94, - 0x88, 0x34, 0x10, 0xc8, 0xb7, 0xb1, 0x52, 0x10, 0x55, 0x96, 0x03, 0x1e, 0xe8, 0x46, 0xd1, 0xf5, - 0xf0, 0x9c, 0xfc, 0x60, 0xef, 0x81, 0x97, 0x01, 0x11, 0xbd, 0x2e, 0x3d, 0xf2, 0x0f, 0x0b, 0x21, - 0xb2, 0x94, 0xc2, 0xe2, 0x8c, 0x12, 0xf6, 0x8f, 0x59, 0x70, 0x5c, 0x7d, 0xf0, 0xbb, 0xee, 0x33, - 0xed, 0xaf, 0x15, 0x60, 0xf4, 0xf2, 0xc6, 0x46, 0xe5, 0x12, 0x89, 0xb4, 0x59, 0xd9, 0xd9, 0xec, - 0x8f, 0x35, 0xeb, 0x65, 0xa7, 0x3b, 0x62, 0x3b, 0x72, 0x1b, 0xb3, 0x3c, 0x80, 0xdc, 0xec, 0x8a, - 0x17, 0x5d, 0x0d, 0xaa, 0x51, 0xe0, 0x7a, 0x5b, 0x99, 0x33, 0x5d, 0xca, 0x2c, 0xc5, 0x3c, 0x99, - 0x05, 0x3d, 0x0f, 0x03, 0x2c, 0x82, 0x9d, 0x1c, 0x84, 0x87, 0xd5, 0x15, 0x8b, 0x41, 0xef, 0xed, - 0x97, 0x4b, 0xd7, 0xf0, 0x0a, 0xff, 0x83, 0x05, 0x29, 0xba, 0x06, 0xc3, 0xdb, 0x51, 0xd4, 0xba, - 0x4c, 0x9c, 0x3a, 0x09, 0xe4, 0x2e, 0x7b, 0x26, 0x6b, 0x97, 0xa5, 0x9d, 0xc0, 0xc9, 0xe2, 0x8d, - 0x29, 0x86, 0x85, 0x58, 0xe7, 0x63, 0x57, 0x01, 0x62, 0xdc, 0x21, 0x19, 0x6e, 0xec, 0x0d, 0x28, - 0xd1, 0xcf, 0x9d, 0x6b, 0xb8, 0x4e, 0x67, 0xd3, 0xf8, 0xd3, 0x50, 0x92, 0x86, 0xef, 0x50, 0x84, - 0xbb, 0x60, 0x27, 0x92, 0xb4, 0x8b, 0x87, 0x38, 0xc6, 0xdb, 0x8f, 0x81, 0xf0, 0x2d, 0xed, 0xc4, - 0xd2, 0xde, 0x84, 0x63, 0xcc, 0x49, 0xd6, 0x89, 0xb6, 0x8d, 0x39, 0xda, 0x7d, 0x32, 0x3c, 0x23, - 0xee, 0x75, 0xfc, 0xcb, 0xa6, 0xb5, 0xc7, 0xc9, 0x23, 0x92, 0x63, 0x7c, 0xc7, 0xb3, 0xff, 0xbc, - 0x0f, 0x1e, 0x5e, 0xa9, 0xe6, 0xc7, 0x69, 0x7a, 0x09, 0x46, 0xb8, 0xb8, 0x48, 0xa7, 0x86, 0xd3, - 0x10, 0xf5, 0x2a, 0x0d, 0xe8, 0x86, 0x86, 0xc3, 0x06, 0x25, 0x3a, 0x0d, 0x45, 0xf7, 0x6d, 0x2f, - 0xf9, 0x74, 0x6f, 0xe5, 0x8d, 0x75, 0x4c, 0xe1, 0x14, 0x4d, 0x25, 0x4f, 0xbe, 0xa5, 0x2b, 0xb4, - 0x92, 0x3e, 0x5f, 0x83, 0x31, 0x37, 0xac, 0x85, 0xee, 0x8a, 0x47, 0xd7, 0xa9, 0xb6, 0xd2, 0x95, - 0xce, 0x81, 0x36, 0x5a, 0x61, 0x71, 0x82, 0x5a, 0x3b, 0x5f, 0xfa, 0x7b, 0x96, 0x5e, 0xbb, 0x46, - 0x89, 0xa0, 0xdb, 0x7f, 0x8b, 0x7d, 0x5d, 0xc8, 0x54, 0xf0, 0x62, 0xfb, 0xe7, 0x1f, 0x1c, 0x62, - 0x89, 0xa3, 0x17, 0xba, 0xda, 0xb6, 0xd3, 0x9a, 0x6b, 0x47, 0xdb, 0x8b, 0x6e, 0x58, 0xf3, 0x77, - 0x49, 0xb0, 0xc7, 0xee, 0xe2, 0x43, 0xf1, 0x85, 0x4e, 0x21, 0x16, 0x2e, 0xcf, 0x55, 0x28, 0x25, - 0x4e, 0x97, 0x41, 0x73, 0x30, 0x2e, 0x81, 0x55, 0x12, 0xb2, 0x23, 0x60, 0x98, 0xb1, 0x51, 0x8f, - 0xe9, 0x04, 0x58, 0x31, 0x49, 0xd2, 0x9b, 0x02, 0x2e, 0x1c, 0x86, 0x80, 0xfb, 0x41, 0x18, 0x75, - 0x3d, 0x37, 0x72, 0x9d, 0xc8, 0xe7, 0xf6, 0x23, 0x7e, 0xed, 0x66, 0x0a, 0xe6, 0x15, 0x1d, 0x81, - 0x4d, 0x3a, 0xfb, 0xdf, 0xf7, 0xc1, 0x24, 0x1b, 0xb6, 0xf7, 0x66, 0xd8, 0x77, 0xd2, 0x0c, 0xbb, - 0x96, 0x9e, 0x61, 0x87, 0x21, 0xb9, 0xdf, 0xf7, 0x34, 0xfb, 0x34, 0x94, 0xd4, 0xfb, 0x41, 0xf9, - 0x80, 0xd8, 0xca, 0x79, 0x40, 0xdc, 0xfd, 0xf4, 0x96, 0x2e, 0x69, 0xc5, 0x4c, 0x97, 0xb4, 0xaf, - 0x58, 0x10, 0x1b, 0x16, 0xd0, 0x1b, 0x50, 0x6a, 0xf9, 0xcc, 0xc3, 0x35, 0x90, 0x6e, 0xe3, 0x8f, - 0x75, 0xb4, 0x4c, 0xf0, 0x50, 0x75, 0x01, 0xef, 0x85, 0x8a, 0x2c, 0x8a, 0x63, 0x2e, 0xe8, 0x0a, - 0x0c, 0xb6, 0x02, 0x52, 0x8d, 0x58, 0x1c, 0xa5, 0xde, 0x19, 0xf2, 0x59, 0xc3, 0x0b, 0x62, 0xc9, - 0xc1, 0xfe, 0x85, 0x02, 0x4c, 0x24, 0x49, 0xd1, 0xab, 0xd0, 0x47, 0x6e, 0x93, 0x9a, 0x68, 0x6f, - 0xe6, 0x51, 0x1c, 0xab, 0x26, 0x78, 0x07, 0xd0, 0xff, 0x98, 0x95, 0x42, 0x97, 0x61, 0x90, 0x9e, - 0xc3, 0x97, 0x54, 0xcc, 0xc0, 0x47, 0xf2, 0xce, 0x72, 0x25, 0xd0, 0xf0, 0xc6, 0x09, 0x10, 0x96, - 0xc5, 0x99, 0x1f, 0x58, 0xad, 0x55, 0xa5, 0x57, 0x9c, 0xa8, 0xd3, 0x4d, 0x7c, 0x63, 0xa1, 0xc2, - 0x89, 0x04, 0x37, 0xee, 0x07, 0x26, 0x81, 0x38, 0x66, 0x82, 0x3e, 0x02, 0xfd, 0x61, 0x83, 0x90, - 0x96, 0x30, 0xf4, 0x67, 0x2a, 0x17, 0xab, 0x94, 0x40, 0x70, 0x62, 0xca, 0x08, 0x06, 0xc0, 0xbc, - 0xa0, 0xfd, 0x8b, 0x16, 0x00, 0x77, 0x9c, 0x73, 0xbc, 0x2d, 0x72, 0x04, 0xfa, 0xf8, 0x45, 0xe8, - 0x0b, 0x5b, 0xa4, 0xd6, 0xc9, 0x7d, 0x3b, 0x6e, 0x4f, 0xb5, 0x45, 0x6a, 0xf1, 0x9c, 0xa5, 0xff, - 0x30, 0x2b, 0x6d, 0x7f, 0x3f, 0xc0, 0x58, 0x4c, 0xb6, 0x12, 0x91, 0x26, 0x7a, 0xd6, 0x08, 0x5b, - 0x72, 0x32, 0x11, 0xb6, 0xa4, 0xc4, 0xa8, 0x35, 0xd5, 0xef, 0xa7, 0xa1, 0xd8, 0x74, 0x6e, 0x0b, - 0xdd, 0xde, 0xd3, 0x9d, 0x9b, 0x41, 0xf9, 0xcf, 0xae, 0x39, 0xb7, 0xf9, 0xf5, 0xf7, 0x69, 0xb9, - 0xc6, 0xd6, 0x9c, 0xdb, 0x5d, 0x5d, 0x8c, 0x69, 0x25, 0xac, 0x2e, 0xd7, 0x13, 0x3e, 0x61, 0x3d, - 0xd5, 0xe5, 0x7a, 0xc9, 0xba, 0x5c, 0xaf, 0x87, 0xba, 0x5c, 0x0f, 0xdd, 0x81, 0x41, 0xe1, 0xb2, - 0x29, 0x22, 0xc0, 0x5d, 0xe8, 0xa1, 0x3e, 0xe1, 0xf1, 0xc9, 0xeb, 0xbc, 0x20, 0xaf, 0xf7, 0x02, - 0xda, 0xb5, 0x5e, 0x59, 0x21, 0xfa, 0x7f, 0x2c, 0x18, 0x13, 0xbf, 0x31, 0x79, 0xbb, 0x4d, 0xc2, - 0x48, 0x88, 0xbf, 0x1f, 0xe8, 0xbd, 0x0d, 0xa2, 0x20, 0x6f, 0xca, 0x07, 0xe4, 0x49, 0x65, 0x22, - 0xbb, 0xb6, 0x28, 0xd1, 0x0a, 0xf4, 0x0b, 0x16, 0x1c, 0x6b, 0x3a, 0xb7, 0x79, 0x8d, 0x1c, 0x86, - 0x9d, 0xc8, 0xf5, 0x85, 0xeb, 0xc3, 0xab, 0xbd, 0x0d, 0x7f, 0xaa, 0x38, 0x6f, 0xa4, 0xb4, 0x73, - 0x1e, 0xcb, 0x22, 0xe9, 0xda, 0xd4, 0xcc, 0x76, 0xcd, 0x6c, 0xc2, 0x90, 0x9c, 0x6f, 0x0f, 0xd2, - 0x3f, 0x9c, 0xd5, 0x23, 0xe6, 0xda, 0x03, 0xad, 0xe7, 0xd3, 0x30, 0xa2, 0xcf, 0xb1, 0x07, 0x5a, - 0xd7, 0xdb, 0x30, 0x95, 0x31, 0x97, 0x1e, 0x68, 0x95, 0xb7, 0xe0, 0x64, 0xee, 0xfc, 0x78, 0xa0, - 0xfe, 0xfd, 0x5f, 0xb3, 0xf4, 0x7d, 0xf0, 0x08, 0x8c, 0x22, 0x0b, 0xa6, 0x51, 0xe4, 0x4c, 0xe7, - 0x95, 0x93, 0x63, 0x19, 0x79, 0x4b, 0x6f, 0x34, 0xdd, 0xd5, 0xd1, 0xeb, 0x30, 0xd0, 0xa0, 0x10, - 0xe9, 0xf8, 0x6b, 0x77, 0x5f, 0x91, 0xb1, 0x38, 0xca, 0xe0, 0x21, 0x16, 0x1c, 0xec, 0x5f, 0xb1, - 0xa0, 0xef, 0x08, 0x7a, 0x02, 0x9b, 0x3d, 0xf1, 0x6c, 0x2e, 0x6b, 0x11, 0x0c, 0x7f, 0x16, 0x3b, - 0xb7, 0x96, 0x6e, 0x47, 0xc4, 0x0b, 0xd9, 0x99, 0x9e, 0xd9, 0x31, 0xfb, 0x16, 0x4c, 0xad, 0xfa, - 0x4e, 0x7d, 0xde, 0x69, 0x38, 0x5e, 0x8d, 0x04, 0x2b, 0xde, 0xd6, 0x81, 0xbc, 0xd6, 0x0b, 0x5d, - 0xbd, 0xd6, 0x5f, 0x82, 0x01, 0xb7, 0xa5, 0x05, 0xf7, 0x3e, 0x4b, 0x3b, 0x70, 0xa5, 0x22, 0xe2, - 0x7a, 0x23, 0xa3, 0x72, 0x06, 0xc5, 0x82, 0x9e, 0x8e, 0x3c, 0x77, 0x17, 0xeb, 0xcb, 0x1f, 0x79, - 0x2a, 0xc5, 0x27, 0x43, 0x40, 0x19, 0x8e, 0xcd, 0xdb, 0x60, 0x54, 0x21, 0x5e, 0x7d, 0x61, 0x18, - 0x74, 0xf9, 0x97, 0x8a, 0xe1, 0x7f, 0x22, 0x5b, 0xba, 0x4e, 0x75, 0x8c, 0xf6, 0x9e, 0x89, 0x03, - 0xb0, 0x64, 0x64, 0xbf, 0x04, 0x99, 0x21, 0x3b, 0xba, 0x6b, 0x4e, 0xec, 0x8f, 0xc1, 0x24, 0x2b, - 0x79, 0x40, 0xad, 0x84, 0x9d, 0xd0, 0xf7, 0x66, 0xc4, 0x69, 0xb5, 0xff, 0x8d, 0x05, 0x68, 0xcd, - 0xaf, 0xbb, 0x9b, 0x7b, 0x82, 0x39, 0xff, 0xfe, 0xb7, 0xa1, 0xcc, 0xaf, 0x7d, 0xc9, 0x58, 0xa6, - 0x0b, 0x0d, 0x27, 0x0c, 0x35, 0x5d, 0xf3, 0x13, 0xa2, 0xde, 0xf2, 0x46, 0x67, 0x72, 0xdc, 0x8d, - 0x1f, 0x7a, 0x23, 0x11, 0xa8, 0xed, 0x43, 0xa9, 0x40, 0x6d, 0x4f, 0x64, 0x7a, 0x7c, 0xa4, 0x5b, - 0x2f, 0x03, 0xb8, 0xd9, 0x5f, 0xb0, 0x60, 0x7c, 0x3d, 0x11, 0x9b, 0xf3, 0x1c, 0x33, 0x7f, 0x67, - 0xd8, 0x50, 0xaa, 0x0c, 0x8a, 0x05, 0xf6, 0xd0, 0x75, 0x8c, 0x7f, 0x63, 0x41, 0x1c, 0x22, 0xe8, - 0x08, 0xa4, 0xda, 0x05, 0x43, 0xaa, 0xcd, 0xbc, 0x21, 0xa8, 0xe6, 0xe4, 0x09, 0xb5, 0xe8, 0x8a, - 0x1a, 0x93, 0x0e, 0x97, 0x83, 0x98, 0x0d, 0x5f, 0x67, 0x63, 0xe6, 0xc0, 0xa9, 0xd1, 0xf8, 0xc3, - 0x02, 0x20, 0x45, 0xdb, 0x73, 0x70, 0xbf, 0x74, 0x89, 0xc3, 0x09, 0xee, 0xb7, 0x0b, 0x88, 0x39, - 0x70, 0x04, 0x8e, 0x17, 0x72, 0xb6, 0xae, 0xd0, 0xaa, 0x1e, 0xcc, 0x3b, 0x64, 0x46, 0xbe, 0xf6, - 0x5b, 0x4d, 0x71, 0xc3, 0x19, 0x35, 0x68, 0x8e, 0x39, 0xfd, 0xbd, 0x3a, 0xe6, 0x0c, 0x74, 0x79, - 0xb6, 0xfa, 0x55, 0x0b, 0x46, 0x55, 0x37, 0xbd, 0x4b, 0x1e, 0x37, 0xa8, 0xf6, 0xe4, 0x9c, 0x2b, - 0x15, 0xad, 0xc9, 0xec, 0xbc, 0xfd, 0x2e, 0xf6, 0xfc, 0xd8, 0x69, 0xb8, 0x77, 0x88, 0x8a, 0x9a, - 0x5b, 0x16, 0xcf, 0x89, 0x05, 0xf4, 0xde, 0x7e, 0x79, 0x54, 0xfd, 0xe3, 0x51, 0x2f, 0xe3, 0x22, - 0xf6, 0x4f, 0xd1, 0xc5, 0x6e, 0x4e, 0x45, 0xf4, 0x22, 0xf4, 0xb7, 0xb6, 0x9d, 0x90, 0x24, 0x1e, - 0x81, 0xf5, 0x57, 0x28, 0xf0, 0xde, 0x7e, 0x79, 0x4c, 0x15, 0x60, 0x10, 0xcc, 0xa9, 0x7b, 0x0f, - 0x99, 0x98, 0x9e, 0x9c, 0x5d, 0x43, 0x26, 0xfe, 0x95, 0x05, 0x7d, 0xeb, 0xf4, 0xf4, 0x7a, 0xf0, - 0x5b, 0xc0, 0x6b, 0xc6, 0x16, 0x70, 0x2a, 0x2f, 0x61, 0x4b, 0xee, 0xea, 0x5f, 0x4e, 0xac, 0xfe, - 0x33, 0xb9, 0x1c, 0x3a, 0x2f, 0xfc, 0x26, 0x0c, 0xb3, 0x34, 0x30, 0xe2, 0xc1, 0xdb, 0xf3, 0xc6, - 0x82, 0x2f, 0x27, 0x16, 0xfc, 0xb8, 0x46, 0xaa, 0xad, 0xf4, 0x27, 0x61, 0x50, 0xbc, 0xa0, 0x4a, - 0xbe, 0xe2, 0x16, 0xb4, 0x58, 0xe2, 0xed, 0x1f, 0x2f, 0x82, 0x91, 0x76, 0x06, 0xfd, 0x9a, 0x05, - 0xb3, 0x01, 0xf7, 0xac, 0xae, 0x2f, 0xb6, 0x03, 0xd7, 0xdb, 0xaa, 0xd6, 0xb6, 0x49, 0xbd, 0xdd, - 0x70, 0xbd, 0xad, 0x95, 0x2d, 0xcf, 0x57, 0xe0, 0xa5, 0xdb, 0xa4, 0xd6, 0x66, 0x56, 0xcf, 0x2e, - 0x39, 0x6e, 0xd4, 0x0b, 0x85, 0xe7, 0xee, 0xee, 0x97, 0x67, 0xf1, 0x81, 0x78, 0xe3, 0x03, 0xb6, - 0x05, 0xfd, 0x9e, 0x05, 0x17, 0x78, 0x36, 0x96, 0xde, 0xdb, 0xdf, 0x41, 0x89, 0x50, 0x91, 0xac, - 0x62, 0x26, 0x1b, 0x24, 0x68, 0xce, 0x7f, 0x50, 0x74, 0xe8, 0x85, 0xca, 0xc1, 0xea, 0xc2, 0x07, - 0x6d, 0x9c, 0xfd, 0x8f, 0x8a, 0x30, 0x2a, 0x42, 0xeb, 0x89, 0x33, 0xe0, 0x45, 0x63, 0x4a, 0x3c, - 0x92, 0x98, 0x12, 0x93, 0x06, 0xf1, 0xe1, 0x6c, 0xff, 0x21, 0x4c, 0xd2, 0xcd, 0xf9, 0x32, 0x71, - 0x82, 0xe8, 0x26, 0x71, 0xb8, 0xbf, 0x5d, 0xf1, 0xc0, 0xbb, 0xbf, 0x52, 0xfc, 0xae, 0x26, 0x99, - 0xe1, 0x34, 0xff, 0xef, 0xa4, 0x33, 0xc7, 0x83, 0x89, 0x54, 0x74, 0xc4, 0x37, 0xa1, 0xa4, 0x9e, - 0xff, 0x88, 0x4d, 0xa7, 0x73, 0x90, 0xd1, 0x24, 0x07, 0xae, 0x57, 0x8c, 0x9f, 0x9e, 0xc5, 0xec, - 0xec, 0xbf, 0x57, 0x30, 0x2a, 0xe4, 0x83, 0xb8, 0x0e, 0x43, 0x4e, 0xc8, 0x02, 0x1f, 0xd7, 0x3b, - 0xa9, 0x7e, 0x53, 0xd5, 0xb0, 0x27, 0x58, 0x73, 0xa2, 0x24, 0x56, 0x3c, 0xd0, 0x65, 0xee, 0xd5, - 0xb8, 0x4b, 0x3a, 0xe9, 0x7d, 0x53, 0xdc, 0x40, 0xfa, 0x3d, 0xee, 0x12, 0x2c, 0xca, 0xa3, 0x4f, - 0x70, 0xb7, 0xd3, 0x2b, 0x9e, 0x7f, 0xcb, 0xbb, 0xe4, 0xfb, 0x32, 0x8c, 0x4a, 0x6f, 0x0c, 0x27, - 0xa5, 0xb3, 0xa9, 0x2a, 0x8e, 0x4d, 0x6e, 0xbd, 0x85, 0x1b, 0xfe, 0x0c, 0xb0, 0xec, 0x13, 0xe6, - 0x6b, 0xfb, 0x10, 0x11, 0x18, 0x17, 0x71, 0x1b, 0x25, 0x4c, 0xf4, 0x5d, 0xe6, 0x0d, 0xd7, 0x2c, - 0x1d, 0x5b, 0x28, 0xae, 0x98, 0x2c, 0x70, 0x92, 0xa7, 0xfd, 0x33, 0x16, 0xb0, 0x97, 0xc7, 0x47, - 0x20, 0x8f, 0x7c, 0xd8, 0x94, 0x47, 0xa6, 0xf3, 0x3a, 0x39, 0x47, 0x14, 0x79, 0x81, 0xcf, 0xac, - 0x4a, 0xe0, 0xdf, 0xde, 0x13, 0xbe, 0x42, 0xdd, 0x2f, 0x57, 0xf6, 0x7f, 0xb7, 0xf8, 0x26, 0x16, - 0xc7, 0x69, 0xf8, 0x2c, 0x0c, 0xd5, 0x9c, 0x96, 0x53, 0xe3, 0x39, 0xd2, 0x72, 0x15, 0x9d, 0x46, - 0xa1, 0xd9, 0x05, 0x51, 0x82, 0x2b, 0xee, 0x64, 0xfc, 0xcf, 0x21, 0x09, 0xee, 0xaa, 0xac, 0x53, - 0x55, 0xce, 0xec, 0xc0, 0xa8, 0xc1, 0xec, 0x81, 0x6a, 0x79, 0x3e, 0xcb, 0x8f, 0x58, 0x15, 0xaf, - 0xb6, 0x09, 0x93, 0x9e, 0xf6, 0x9f, 0x1e, 0x28, 0xf2, 0xe6, 0xfc, 0x58, 0xb7, 0x43, 0x94, 0x9d, - 0x3e, 0xda, 0xa3, 0xe6, 0x04, 0x1b, 0x9c, 0xe6, 0x6c, 0xff, 0x84, 0x05, 0x0f, 0xe9, 0x84, 0xda, - 0xbb, 0xa9, 0x6e, 0xd6, 0xa7, 0x45, 0x18, 0xf2, 0x5b, 0x24, 0x70, 0x22, 0x3f, 0x10, 0xa7, 0xc6, - 0x79, 0xd9, 0xe9, 0x57, 0x05, 0xfc, 0x9e, 0xc8, 0xf8, 0x21, 0xb9, 0x4b, 0x38, 0x56, 0x25, 0xe9, - 0xd5, 0x9a, 0x75, 0x46, 0x28, 0x5e, 0xc8, 0xb1, 0x3d, 0x80, 0x39, 0x32, 0x84, 0x58, 0x60, 0xec, - 0x3f, 0xb7, 0xf8, 0xc4, 0xd2, 0x9b, 0x8e, 0xde, 0x86, 0x89, 0xa6, 0x13, 0xd5, 0xb6, 0x97, 0x6e, - 0xb7, 0x02, 0x6e, 0xcb, 0x93, 0xfd, 0xf4, 0x74, 0xb7, 0x7e, 0xd2, 0x3e, 0x32, 0xf6, 0xa4, 0x5d, - 0x4b, 0x30, 0xc3, 0x29, 0xf6, 0xe8, 0x26, 0x0c, 0x33, 0x18, 0x7b, 0xfc, 0x19, 0x76, 0x12, 0x0d, - 0xf2, 0x6a, 0x53, 0xbe, 0x20, 0x6b, 0x31, 0x1f, 0xac, 0x33, 0xb5, 0xbf, 0x52, 0xe4, 0xab, 0x9d, - 0x89, 0xf2, 0x4f, 0xc2, 0x60, 0xcb, 0xaf, 0x2f, 0xac, 0x2c, 0x62, 0x31, 0x0a, 0xea, 0x18, 0xa9, - 0x70, 0x30, 0x96, 0x78, 0x74, 0x1e, 0x86, 0xc4, 0x4f, 0x69, 0x7b, 0x65, 0x7b, 0xb3, 0xa0, 0x0b, - 0xb1, 0xc2, 0xa2, 0xe7, 0x00, 0x5a, 0x81, 0xbf, 0xeb, 0xd6, 0x59, 0x30, 0x98, 0xa2, 0xe9, 0xc6, - 0x55, 0x51, 0x18, 0xac, 0x51, 0xa1, 0x57, 0x60, 0xb4, 0xed, 0x85, 0x5c, 0x1c, 0xd1, 0x42, 0x6e, - 0x2b, 0x07, 0xa3, 0x6b, 0x3a, 0x12, 0x9b, 0xb4, 0x68, 0x0e, 0x06, 0x22, 0x87, 0xb9, 0x25, 0xf5, - 0xe7, 0x7b, 0x5b, 0x6f, 0x50, 0x0a, 0x3d, 0x1d, 0x17, 0x2d, 0x80, 0x45, 0x41, 0xf4, 0xa6, 0x7c, - 0x87, 0xcd, 0x37, 0x76, 0xf1, 0xcc, 0xa1, 0xb7, 0x43, 0x40, 0x7b, 0x85, 0x2d, 0x9e, 0x4f, 0x18, - 0xbc, 0xd0, 0xcb, 0x00, 0xe4, 0x76, 0x44, 0x02, 0xcf, 0x69, 0x28, 0x67, 0x42, 0x25, 0x17, 0x2c, - 0xfa, 0xeb, 0x7e, 0x74, 0x2d, 0x24, 0x4b, 0x8a, 0x02, 0x6b, 0xd4, 0xf6, 0xef, 0x95, 0x00, 0x62, - 0xb9, 0x1d, 0xdd, 0x49, 0x6d, 0x5c, 0xcf, 0x74, 0x96, 0xf4, 0x0f, 0x6f, 0xd7, 0x42, 0x9f, 0xb7, - 0x60, 0x58, 0xc4, 0xbc, 0x61, 0x23, 0x54, 0xe8, 0xbc, 0x71, 0x9a, 0xa1, 0x77, 0x68, 0x09, 0xde, - 0x84, 0xe7, 0xe5, 0x0c, 0xd5, 0x30, 0x5d, 0x5b, 0xa1, 0x57, 0x8c, 0xde, 0x2f, 0xaf, 0x8a, 0x45, - 0xa3, 0x2b, 0xd5, 0x55, 0xb1, 0xc4, 0xce, 0x08, 0xfd, 0x96, 0x78, 0xcd, 0xb8, 0x25, 0xf6, 0xe5, - 0x3f, 0x34, 0x35, 0xc4, 0xd7, 0x6e, 0x17, 0x44, 0x54, 0xd1, 0x83, 0x4e, 0xf4, 0xe7, 0xbf, 0x8e, - 0xd4, 0xee, 0x49, 0x5d, 0x02, 0x4e, 0x7c, 0x1a, 0xc6, 0xeb, 0xa6, 0x10, 0x20, 0x66, 0xe2, 0x13, - 0x79, 0x7c, 0x13, 0x32, 0x43, 0x7c, 0xec, 0x27, 0x10, 0x38, 0xc9, 0x18, 0x55, 0x78, 0x0c, 0x92, - 0x15, 0x6f, 0xd3, 0x17, 0x4f, 0x6d, 0xec, 0xdc, 0xb1, 0xdc, 0x0b, 0x23, 0xd2, 0xa4, 0x94, 0xf1, - 0xe9, 0xbe, 0x2e, 0xca, 0x62, 0xc5, 0x05, 0xbd, 0x0e, 0x03, 0xec, 0x79, 0x5c, 0x38, 0x3d, 0x94, - 0xaf, 0x88, 0x37, 0x83, 0x31, 0xc6, 0x0b, 0x92, 0xfd, 0x0d, 0xb1, 0xe0, 0x80, 0x2e, 0xcb, 0xc7, - 0xa7, 0xe1, 0x8a, 0x77, 0x2d, 0x24, 0xec, 0xf1, 0x69, 0x69, 0xfe, 0xb1, 0xf8, 0x5d, 0x29, 0x87, - 0x67, 0x26, 0xed, 0x34, 0x4a, 0x52, 0x29, 0x4a, 0xfc, 0x97, 0xb9, 0x40, 0x45, 0xe8, 0xa8, 0xcc, - 0xe6, 0x99, 0xf9, 0x42, 0xe3, 0xee, 0xbc, 0x6e, 0xb2, 0xc0, 0x49, 0x9e, 0x54, 0x22, 0xe5, 0xab, - 0x5e, 0x3c, 0xd6, 0xe9, 0xb6, 0x77, 0xf0, 0x8b, 0x38, 0x3b, 0x8d, 0x38, 0x04, 0x8b, 0xf2, 0x47, - 0x2a, 0x1e, 0xcc, 0x78, 0x30, 0x91, 0x5c, 0xa2, 0x0f, 0x54, 0x1c, 0xf9, 0xd3, 0x3e, 0x18, 0x33, - 0xa7, 0x14, 0xba, 0x00, 0x25, 0xc1, 0x44, 0xe5, 0xd3, 0x51, 0xab, 0x64, 0x4d, 0x22, 0x70, 0x4c, - 0xc3, 0xd2, 0x28, 0xb1, 0xe2, 0x9a, 0x77, 0x76, 0x9c, 0x46, 0x49, 0x61, 0xb0, 0x46, 0x45, 0x2f, - 0x56, 0x37, 0x7d, 0x3f, 0x52, 0x07, 0x92, 0x9a, 0x77, 0xf3, 0x0c, 0x8a, 0x05, 0x96, 0x1e, 0x44, - 0x3b, 0x24, 0xf0, 0x48, 0xc3, 0x0c, 0xcf, 0xae, 0x0e, 0xa2, 0x2b, 0x3a, 0x12, 0x9b, 0xb4, 0xf4, - 0x38, 0xf5, 0x43, 0x36, 0x91, 0xc5, 0xf5, 0x2d, 0xf6, 0x76, 0xaf, 0xf2, 0x77, 0xfb, 0x12, 0x8f, - 0x3e, 0x06, 0x0f, 0xa9, 0x50, 0x68, 0x98, 0x1b, 0x79, 0x64, 0x8d, 0x03, 0x86, 0xb6, 0xe5, 0xa1, - 0x85, 0x6c, 0x32, 0x9c, 0x57, 0x1e, 0xbd, 0x06, 0x63, 0x42, 0xc4, 0x97, 0x1c, 0x07, 0x4d, 0xd7, - 0xad, 0x2b, 0x06, 0x16, 0x27, 0xa8, 0x65, 0x80, 0x79, 0x26, 0x65, 0x4b, 0x0e, 0x43, 0xe9, 0x00, - 0xf3, 0x3a, 0x1e, 0xa7, 0x4a, 0xa0, 0x39, 0x18, 0xe7, 0x32, 0x98, 0xeb, 0x6d, 0xf1, 0x31, 0x11, - 0x6f, 0xe9, 0xd4, 0x92, 0xba, 0x6a, 0xa2, 0x71, 0x92, 0x1e, 0xbd, 0x04, 0x23, 0x4e, 0x50, 0xdb, - 0x76, 0x23, 0x52, 0x8b, 0xda, 0x01, 0x7f, 0x64, 0xa7, 0xf9, 0xbe, 0xcd, 0x69, 0x38, 0x6c, 0x50, - 0xda, 0x77, 0x60, 0x2a, 0x23, 0xa0, 0x07, 0x9d, 0x38, 0x4e, 0xcb, 0x95, 0xdf, 0x94, 0x70, 0x30, - 0x9f, 0xab, 0xac, 0xc8, 0xaf, 0xd1, 0xa8, 0xe8, 0xec, 0x64, 0x81, 0x3f, 0xb4, 0xd4, 0xbf, 0x6a, - 0x76, 0x2e, 0x4b, 0x04, 0x8e, 0x69, 0xec, 0xff, 0x54, 0x80, 0xf1, 0x0c, 0xc3, 0x11, 0x4b, 0x3f, - 0x9b, 0xb8, 0xa4, 0xc4, 0xd9, 0x66, 0xcd, 0x7c, 0x05, 0x85, 0x03, 0xe4, 0x2b, 0x28, 0x76, 0xcb, - 0x57, 0xd0, 0xf7, 0x4e, 0xf2, 0x15, 0x98, 0x3d, 0xd6, 0xdf, 0x53, 0x8f, 0x65, 0xe4, 0x38, 0x18, - 0x38, 0x60, 0x8e, 0x03, 0xa3, 0xd3, 0x07, 0x7b, 0xe8, 0xf4, 0x1f, 0x29, 0xc0, 0x44, 0xd2, 0xe6, - 0x74, 0x04, 0x7a, 0xdb, 0xd7, 0x0d, 0xbd, 0xed, 0xf9, 0x5e, 0xde, 0x3e, 0xe7, 0xea, 0x70, 0x71, - 0x42, 0x87, 0xfb, 0x54, 0x4f, 0xdc, 0x3a, 0xeb, 0x73, 0x7f, 0xb2, 0x00, 0xc7, 0x33, 0x4d, 0x71, - 0x47, 0xd0, 0x37, 0x57, 0x8d, 0xbe, 0x79, 0xb6, 0xe7, 0x77, 0xe1, 0xb9, 0x1d, 0x74, 0x23, 0xd1, - 0x41, 0x17, 0x7a, 0x67, 0xd9, 0xb9, 0x97, 0xbe, 0x51, 0x84, 0x33, 0x99, 0xe5, 0x62, 0xb5, 0xe7, - 0xb2, 0xa1, 0xf6, 0x7c, 0x2e, 0xa1, 0xf6, 0xb4, 0x3b, 0x97, 0x3e, 0x1c, 0x3d, 0xa8, 0x78, 0x1f, - 0xcd, 0xa2, 0x3c, 0xdc, 0xa7, 0x0e, 0xd4, 0x78, 0x1f, 0xad, 0x18, 0x61, 0x93, 0xef, 0x77, 0x92, - 0xee, 0xf3, 0x77, 0x2c, 0x38, 0x99, 0x39, 0x36, 0x47, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, 0x4f, - 0xf6, 0x3c, 0x5b, 0x73, 0x94, 0x5f, 0x5f, 0x18, 0xc8, 0xf9, 0x16, 0x76, 0x93, 0xbf, 0x0a, 0xc3, - 0x4e, 0xad, 0x46, 0xc2, 0x70, 0xcd, 0xaf, 0xab, 0xd0, 0xe6, 0xcf, 0xb2, 0x7b, 0x56, 0x0c, 0xbe, - 0xb7, 0x5f, 0x9e, 0x49, 0xb2, 0x88, 0xd1, 0x58, 0xe7, 0x80, 0x3e, 0x01, 0x43, 0xa1, 0xcc, 0x4a, - 0xd7, 0x77, 0xff, 0x59, 0xe9, 0x98, 0x92, 0x40, 0x69, 0x2a, 0x14, 0x4b, 0xf4, 0xbf, 0xe9, 0xf1, - 0x76, 0xd2, 0x52, 0x65, 0x22, 0xfa, 0xcb, 0x7d, 0x44, 0xdd, 0x79, 0x0e, 0x60, 0x57, 0x5d, 0x09, - 0x92, 0x5a, 0x08, 0xed, 0xb2, 0xa0, 0x51, 0xa1, 0x8f, 0xc0, 0x44, 0xc8, 0x43, 0x4d, 0xc6, 0xce, - 0x13, 0x7c, 0x2e, 0xb2, 0x68, 0x5d, 0xd5, 0x04, 0x0e, 0xa7, 0xa8, 0xd1, 0xb2, 0xac, 0x95, 0xb9, - 0xc9, 0xf0, 0xe9, 0x79, 0x2e, 0xae, 0x51, 0xb8, 0xca, 0x1c, 0x4b, 0x0e, 0x02, 0xeb, 0x7e, 0xad, - 0x24, 0xfa, 0x04, 0x00, 0x9d, 0x44, 0x42, 0x1b, 0x31, 0x98, 0xbf, 0x85, 0xd2, 0xbd, 0xa5, 0x9e, - 0xe9, 0x3b, 0xce, 0x1e, 0x36, 0x2f, 0x2a, 0x26, 0x58, 0x63, 0x88, 0x1c, 0x18, 0x8d, 0xff, 0xc5, - 0x19, 0xa2, 0xcf, 0xe7, 0xd6, 0x90, 0x64, 0xce, 0x14, 0xdf, 0x8b, 0x3a, 0x0b, 0x6c, 0x72, 0x44, - 0x1f, 0x87, 0x93, 0xbb, 0xb9, 0x1e, 0x29, 0xa5, 0x38, 0xe9, 0x63, 0xbe, 0x1f, 0x4a, 0x7e, 0x79, - 0xfb, 0x77, 0x01, 0x1e, 0xee, 0xb0, 0xd3, 0xa3, 0x39, 0xd3, 0x9a, 0xfc, 0x74, 0x52, 0x45, 0x30, - 0x93, 0x59, 0xd8, 0xd0, 0x19, 0x24, 0x16, 0x54, 0xe1, 0x1d, 0x2f, 0xa8, 0x1f, 0xb2, 0x34, 0xe5, - 0x0d, 0x77, 0xe7, 0xfd, 0xf0, 0x01, 0x4f, 0xb0, 0x43, 0xd4, 0xe6, 0x6c, 0x66, 0xa8, 0x44, 0x9e, - 0xeb, 0xb9, 0x39, 0xbd, 0xeb, 0x48, 0xbe, 0x96, 0x1d, 0xbc, 0x99, 0x6b, 0x4b, 0x2e, 0x1d, 0xf4, - 0xfb, 0x8f, 0x2a, 0x90, 0xf3, 0x1f, 0x5a, 0x70, 0x32, 0x05, 0xe6, 0x6d, 0x20, 0xa1, 0x88, 0x2f, - 0xb6, 0xfe, 0x8e, 0x1b, 0x2f, 0x19, 0xf2, 0x6f, 0xb8, 0x2c, 0xbe, 0xe1, 0x64, 0x2e, 0x5d, 0xb2, - 0xe9, 0x5f, 0xfc, 0x93, 0xf2, 0x14, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x47, 0x2d, 0x38, 0x5b, - 0x6b, 0x07, 0x41, 0x3c, 0x59, 0x33, 0x16, 0x27, 0xbf, 0xeb, 0x3d, 0x76, 0x77, 0xbf, 0x7c, 0x76, - 0xa1, 0x0b, 0x2d, 0xee, 0xca, 0x0d, 0x79, 0x80, 0x9a, 0x29, 0xbf, 0x2f, 0x91, 0x18, 0x3e, 0xd3, - 0x53, 0x23, 0xed, 0x25, 0xc6, 0x1f, 0xb0, 0x66, 0x78, 0x8f, 0x65, 0x70, 0x3e, 0x5a, 0xed, 0xc9, - 0xb7, 0x26, 0x32, 0xf7, 0xcc, 0x2a, 0x9c, 0xe9, 0x3c, 0x99, 0x0e, 0xf4, 0x78, 0xfe, 0x0f, 0x2c, - 0x38, 0xdd, 0x31, 0x42, 0xd3, 0xb7, 0xe1, 0x65, 0xc1, 0xfe, 0x9c, 0x05, 0x8f, 0x64, 0x96, 0x30, - 0x5c, 0x0c, 0x2f, 0x40, 0xa9, 0x96, 0x48, 0x6b, 0x1c, 0xc7, 0x2a, 0x51, 0x29, 0x8d, 0x63, 0x1a, - 0xc3, 0x93, 0xb0, 0xd0, 0xd5, 0x93, 0xf0, 0x37, 0x2d, 0x48, 0x1d, 0xf5, 0x47, 0x20, 0x79, 0xae, - 0x98, 0x92, 0xe7, 0x63, 0xbd, 0xf4, 0x66, 0x8e, 0xd0, 0xf9, 0x97, 0xe3, 0x70, 0x22, 0xe7, 0xed, - 0xeb, 0x2e, 0x4c, 0x6e, 0xd5, 0x88, 0x19, 0xec, 0xa0, 0x53, 0x10, 0xb0, 0x8e, 0x91, 0x11, 0x78, - 0x36, 0xe9, 0x14, 0x09, 0x4e, 0x57, 0x81, 0x3e, 0x67, 0xc1, 0x31, 0xe7, 0x56, 0xb8, 0x44, 0x6f, - 0x10, 0x6e, 0x6d, 0xbe, 0xe1, 0xd7, 0x76, 0xa8, 0x60, 0x26, 0x97, 0xd5, 0x0b, 0x99, 0x5a, 0xdd, - 0x1b, 0xd5, 0x14, 0xbd, 0x51, 0xfd, 0xf4, 0xdd, 0xfd, 0xf2, 0xb1, 0x2c, 0x2a, 0x9c, 0x59, 0x17, - 0xc2, 0x22, 0x7b, 0x8f, 0x13, 0x6d, 0x77, 0x0a, 0xc7, 0x91, 0xf5, 0x48, 0x99, 0x8b, 0xc4, 0x12, - 0x83, 0x15, 0x1f, 0xf4, 0x29, 0x28, 0x6d, 0xc9, 0x97, 0xf7, 0x19, 0x22, 0x77, 0xdc, 0x91, 0x9d, - 0xe3, 0x11, 0x70, 0xd7, 0x0c, 0x45, 0x84, 0x63, 0xa6, 0xe8, 0x35, 0x28, 0x7a, 0x9b, 0x61, 0xa7, - 0xf4, 0xfb, 0x09, 0x1f, 0x5c, 0x1e, 0xf4, 0x66, 0x7d, 0xb9, 0x8a, 0x69, 0x41, 0x74, 0x19, 0x8a, - 0xc1, 0xcd, 0xba, 0x30, 0x49, 0x64, 0x2e, 0x52, 0x3c, 0xbf, 0x98, 0xd3, 0x2a, 0xc6, 0x09, 0xcf, - 0x2f, 0x62, 0xca, 0x02, 0x55, 0xa0, 0x9f, 0x3d, 0x18, 0x15, 0xa2, 0x6d, 0xe6, 0x55, 0xbe, 0xc3, - 0xc3, 0x6b, 0xfe, 0x18, 0x8d, 0x11, 0x60, 0xce, 0x08, 0x6d, 0xc0, 0x40, 0x8d, 0xa5, 0x6a, 0x17, - 0xb2, 0xec, 0xfb, 0x33, 0x8d, 0x0f, 0x1d, 0x72, 0xd8, 0x0b, 0x5d, 0x3c, 0xa3, 0xc0, 0x82, 0x17, - 0xe3, 0x4a, 0x5a, 0xdb, 0x9b, 0xf2, 0xc4, 0xca, 0xe6, 0x4a, 0x5a, 0xdb, 0xcb, 0xd5, 0x8e, 0x5c, - 0x19, 0x05, 0x16, 0xbc, 0xd0, 0xcb, 0x50, 0xd8, 0xac, 0x89, 0xc7, 0xa0, 0x99, 0x56, 0x08, 0x33, - 0x6e, 0xd1, 0xfc, 0xc0, 0xdd, 0xfd, 0x72, 0x61, 0x79, 0x01, 0x17, 0x36, 0x6b, 0x68, 0x1d, 0x06, - 0x37, 0x79, 0xa4, 0x13, 0x61, 0x68, 0x78, 0x22, 0x3b, 0x08, 0x4b, 0x2a, 0x18, 0x0a, 0x7f, 0x58, - 0x28, 0x10, 0x58, 0x32, 0x61, 0xc9, 0x64, 0x54, 0xc4, 0x16, 0x11, 0x30, 0x72, 0xf6, 0x60, 0x51, - 0x76, 0xf8, 0x55, 0x23, 0x8e, 0xfb, 0x82, 0x35, 0x8e, 0x74, 0x56, 0x3b, 0x77, 0xda, 0x01, 0xcb, - 0x26, 0x20, 0x22, 0x8b, 0x65, 0xce, 0xea, 0x39, 0x49, 0xd4, 0x69, 0x56, 0x2b, 0x22, 0x1c, 0x33, - 0x45, 0x3b, 0x30, 0xba, 0x1b, 0xb6, 0xb6, 0x89, 0x5c, 0xd2, 0x2c, 0xd0, 0x58, 0x8e, 0x34, 0x7b, - 0x5d, 0x10, 0xba, 0x41, 0xd4, 0x76, 0x1a, 0xa9, 0x5d, 0x88, 0x5d, 0x6b, 0xae, 0xeb, 0xcc, 0xb0, - 0xc9, 0x9b, 0x76, 0xff, 0xdb, 0x6d, 0xff, 0xe6, 0x5e, 0x44, 0x44, 0x9c, 0xc7, 0xcc, 0xee, 0x7f, - 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, 0xb0, 0x64, 0x82, 0xae, 0x8b, 0xee, 0x61, 0xbb, 0xe7, 0x44, - 0x7e, 0x10, 0xe9, 0x39, 0x49, 0x94, 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb5, - 0xed, 0x47, 0xbe, 0x97, 0xd8, 0xa1, 0x27, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, - 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, 0x8c, 0xb5, 0xfc, 0x20, 0xba, 0xe5, 0x07, 0x72, 0x7e, 0xa1, - 0x0e, 0x8a, 0x52, 0x83, 0x52, 0xd4, 0xc8, 0x42, 0xa8, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x0a, - 0x83, 0x61, 0xcd, 0x69, 0x90, 0x95, 0xab, 0xd3, 0x53, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, - 0x2e, 0x1e, 0xa8, 0x86, 0x93, 0x60, 0xc9, 0x0e, 0x2d, 0x43, 0x3f, 0x4b, 0xd2, 0xca, 0x82, 0x92, - 0xe6, 0xc4, 0xc2, 0x4e, 0x3d, 0xf7, 0xe0, 0x7b, 0x13, 0x03, 0x63, 0x5e, 0x9c, 0xae, 0x01, 0xa1, - 0x29, 0xf0, 0xc3, 0xe9, 0xe3, 0xf9, 0x6b, 0x40, 0x28, 0x18, 0xae, 0x56, 0x3b, 0xad, 0x01, 0x45, - 0x84, 0x63, 0xa6, 0x74, 0x67, 0xa6, 0xbb, 0xe9, 0x89, 0x0e, 0xae, 0x7c, 0xb9, 0x7b, 0x29, 0xdb, - 0x99, 0xe9, 0x4e, 0x4a, 0x59, 0xd8, 0xbf, 0x3e, 0x94, 0x96, 0x59, 0x98, 0x86, 0xe9, 0xff, 0xb0, - 0x52, 0xce, 0x07, 0x1f, 0xe8, 0x55, 0xe1, 0x7d, 0x88, 0x17, 0xd7, 0xcf, 0x59, 0x70, 0xa2, 0x95, - 0xf9, 0x21, 0x42, 0x00, 0xe8, 0x4d, 0x6f, 0xce, 0x3f, 0x5d, 0x05, 0xb0, 0xcd, 0xc6, 0xe3, 0x9c, - 0x9a, 0x92, 0xca, 0x81, 0xe2, 0x3b, 0x56, 0x0e, 0xac, 0xc1, 0x50, 0x8d, 0xdf, 0xe4, 0x64, 0xe0, - 0xf5, 0x9e, 0xc2, 0x2f, 0x32, 0x51, 0x42, 0x5c, 0x01, 0x37, 0xb1, 0x62, 0x81, 0x7e, 0xd8, 0x82, - 0xd3, 0xc9, 0xa6, 0x63, 0xc2, 0xd0, 0x22, 0xea, 0x2d, 0x57, 0x6b, 0x2d, 0x8b, 0xef, 0x4f, 0xc9, - 0xff, 0x06, 0xf1, 0xbd, 0x6e, 0x04, 0xb8, 0x73, 0x65, 0x68, 0x31, 0x43, 0xaf, 0x36, 0x60, 0x5a, - 0x14, 0x7b, 0xd0, 0xad, 0xbd, 0x00, 0x23, 0x4d, 0xbf, 0xed, 0x45, 0xc2, 0xf3, 0x4f, 0x78, 0x21, - 0x31, 0xef, 0x9b, 0x35, 0x0d, 0x8e, 0x0d, 0xaa, 0x84, 0x46, 0x6e, 0xe8, 0xbe, 0x35, 0x72, 0x6f, - 0xc1, 0x88, 0xa7, 0xb9, 0xaa, 0x77, 0xba, 0xc1, 0x0a, 0xed, 0xa2, 0x46, 0xcd, 0x5b, 0xa9, 0x43, - 0xb0, 0xc1, 0xad, 0xb3, 0xb6, 0x0c, 0xde, 0x99, 0xb6, 0xec, 0x68, 0xfd, 0x0d, 0x7f, 0xbe, 0x90, - 0x71, 0x63, 0xe0, 0x5a, 0xb9, 0x57, 0x4d, 0xad, 0xdc, 0xb9, 0xa4, 0x56, 0x2e, 0x65, 0xaa, 0x32, - 0x14, 0x72, 0xbd, 0x67, 0x87, 0xeb, 0x39, 0xa4, 0xee, 0xf7, 0x5a, 0xf0, 0x10, 0xb3, 0x7d, 0xd0, - 0x0a, 0xde, 0xb1, 0xbd, 0xe3, 0xe1, 0xbb, 0xfb, 0xe5, 0x87, 0x56, 0xb3, 0xd9, 0xe1, 0xbc, 0x7a, - 0xec, 0x06, 0x9c, 0xed, 0x76, 0xee, 0x32, 0x1f, 0xd7, 0xba, 0x72, 0x8e, 0x88, 0x7d, 0x5c, 0xeb, - 0x2b, 0x8b, 0x98, 0x61, 0x7a, 0x0d, 0x18, 0x67, 0xff, 0x07, 0x0b, 0x8a, 0x15, 0xbf, 0x7e, 0x04, - 0x37, 0xfa, 0x0f, 0x1b, 0x37, 0xfa, 0x87, 0xb3, 0x4f, 0xfc, 0x7a, 0xae, 0xb1, 0x6f, 0x29, 0x61, - 0xec, 0x3b, 0x9d, 0xc7, 0xa0, 0xb3, 0x69, 0xef, 0xa7, 0x8a, 0x30, 0x5c, 0xf1, 0xeb, 0x6a, 0x9d, - 0xfd, 0x93, 0xfb, 0x79, 0x60, 0x92, 0x9b, 0xef, 0x47, 0xe3, 0xcc, 0x5c, 0x63, 0x65, 0xc8, 0x81, - 0x6f, 0xb3, 0x77, 0x26, 0x37, 0x88, 0xbb, 0xb5, 0x1d, 0x91, 0x7a, 0xf2, 0x73, 0x8e, 0xee, 0x9d, - 0xc9, 0x37, 0x8b, 0x30, 0x9e, 0xa8, 0x1d, 0x35, 0x60, 0xb4, 0xa1, 0x9b, 0x92, 0xc4, 0x3c, 0xbd, - 0x2f, 0x2b, 0x94, 0xf0, 0xd3, 0xd7, 0x40, 0xd8, 0x64, 0x8e, 0x66, 0x01, 0x94, 0x6f, 0x85, 0xd4, - 0xf6, 0xb3, 0x6b, 0x8d, 0x72, 0xbe, 0x08, 0xb1, 0x46, 0x81, 0x5e, 0x84, 0xe1, 0xc8, 0x6f, 0xf9, - 0x0d, 0x7f, 0x6b, 0xef, 0x0a, 0x91, 0xb1, 0x04, 0x95, 0xf7, 0xed, 0x46, 0x8c, 0xc2, 0x3a, 0x1d, - 0xba, 0x0d, 0x93, 0x8a, 0x49, 0xf5, 0x10, 0xcc, 0x6b, 0x4c, 0x6d, 0xb2, 0x9e, 0xe4, 0x88, 0xd3, - 0x95, 0xa0, 0x97, 0x61, 0x8c, 0xb9, 0x01, 0xb3, 0xf2, 0x57, 0xc8, 0x9e, 0x8c, 0x31, 0xcb, 0x24, - 0xec, 0x35, 0x03, 0x83, 0x13, 0x94, 0x68, 0x01, 0x26, 0x9b, 0x6e, 0x98, 0x28, 0x3e, 0xc0, 0x8a, - 0xb3, 0x06, 0xac, 0x25, 0x91, 0x38, 0x4d, 0x6f, 0xff, 0xac, 0x18, 0x63, 0x2f, 0x72, 0xdf, 0x5b, - 0x8e, 0xef, 0xee, 0xe5, 0xf8, 0x0d, 0x0b, 0x26, 0x68, 0xed, 0xcc, 0xb7, 0x51, 0x0a, 0x52, 0x2a, - 0x0b, 0x81, 0xd5, 0x21, 0x0b, 0xc1, 0x39, 0xba, 0x6d, 0xd7, 0xfd, 0x76, 0x24, 0xb4, 0xa3, 0xda, - 0xbe, 0x4c, 0xa1, 0x58, 0x60, 0x05, 0x1d, 0x09, 0x02, 0xf1, 0x1e, 0x5b, 0xa7, 0x23, 0x41, 0x80, - 0x05, 0x56, 0x26, 0x29, 0xe8, 0xcb, 0x4e, 0x52, 0xc0, 0x63, 0x4d, 0x0b, 0x2f, 0x38, 0x21, 0xd2, - 0x6a, 0xb1, 0xa6, 0xa5, 0x7b, 0x5c, 0x4c, 0x63, 0x7f, 0xad, 0x08, 0x23, 0x15, 0xbf, 0x1e, 0x3b, - 0x76, 0xbc, 0x60, 0x38, 0x76, 0x9c, 0x4d, 0x38, 0x76, 0x4c, 0xe8, 0xb4, 0xef, 0xb9, 0x71, 0x7c, - 0xab, 0xdc, 0x38, 0x7e, 0xc3, 0x62, 0xa3, 0xb6, 0xb8, 0x5e, 0xe5, 0xae, 0xb2, 0xe8, 0x22, 0x0c, - 0xb3, 0x1d, 0x8e, 0x05, 0x00, 0x90, 0xde, 0x0e, 0x2c, 0x69, 0xe0, 0x7a, 0x0c, 0xc6, 0x3a, 0x0d, - 0x3a, 0x0f, 0x43, 0x21, 0x71, 0x82, 0xda, 0xb6, 0xda, 0xde, 0x85, 0x6b, 0x02, 0x87, 0x61, 0x85, - 0x45, 0x6f, 0xc4, 0x61, 0x8e, 0x8b, 0xf9, 0x0f, 0x8a, 0xf5, 0xf6, 0xf0, 0x25, 0x92, 0x1f, 0xdb, - 0xd8, 0xbe, 0x01, 0x28, 0x4d, 0xdf, 0x43, 0x20, 0xce, 0xb2, 0x19, 0x88, 0xb3, 0x94, 0x0a, 0xc2, - 0xf9, 0xd7, 0x16, 0x8c, 0x55, 0xfc, 0x3a, 0x5d, 0xba, 0xdf, 0x49, 0xeb, 0x54, 0x8f, 0xf1, 0x3e, - 0xd0, 0x21, 0xc6, 0xfb, 0xa3, 0xd0, 0x5f, 0xf1, 0xeb, 0x5d, 0x82, 0x85, 0xfe, 0x2d, 0x0b, 0x06, - 0x2b, 0x7e, 0xfd, 0x08, 0x0c, 0x2f, 0xaf, 0x9a, 0x86, 0x97, 0x87, 0x72, 0xe6, 0x4d, 0x8e, 0xad, - 0xe5, 0xff, 0xef, 0x83, 0x51, 0xda, 0x4e, 0x7f, 0x4b, 0x0e, 0xa5, 0xd1, 0x6d, 0x56, 0x0f, 0xdd, - 0x46, 0xaf, 0x01, 0x7e, 0xa3, 0xe1, 0xdf, 0x4a, 0x0e, 0xeb, 0x32, 0x83, 0x62, 0x81, 0x45, 0xcf, - 0xc0, 0x50, 0x2b, 0x20, 0xbb, 0xae, 0x2f, 0xe4, 0x6b, 0xcd, 0x8c, 0x55, 0x11, 0x70, 0xac, 0x28, - 0xe8, 0xc5, 0x3b, 0x74, 0x3d, 0x2a, 0x4b, 0xd4, 0x7c, 0xaf, 0xce, 0x6d, 0x13, 0x45, 0x91, 0x88, - 0x48, 0x83, 0x63, 0x83, 0x0a, 0xdd, 0x80, 0x12, 0xfb, 0xcf, 0xb6, 0x9d, 0x83, 0xa7, 0x40, 0x17, - 0xa9, 0x59, 0x05, 0x03, 0x1c, 0xf3, 0x42, 0xcf, 0x01, 0x44, 0x32, 0x99, 0x47, 0x28, 0x82, 0x46, - 0xaa, 0xbb, 0x88, 0x4a, 0xf3, 0x11, 0x62, 0x8d, 0x0a, 0x3d, 0x0d, 0xa5, 0xc8, 0x71, 0x1b, 0xab, - 0xae, 0xc7, 0xec, 0xf7, 0xb4, 0xfd, 0x22, 0x43, 0xaa, 0x00, 0xe2, 0x18, 0x4f, 0x65, 0x41, 0x16, - 0x0e, 0x68, 0x7e, 0x2f, 0x12, 0xc9, 0xc0, 0x8a, 0x5c, 0x16, 0x5c, 0x55, 0x50, 0xac, 0x51, 0xa0, - 0x6d, 0x38, 0xe5, 0x7a, 0x2c, 0x69, 0x0f, 0xa9, 0xee, 0xb8, 0xad, 0x8d, 0xd5, 0xea, 0x75, 0x12, - 0xb8, 0x9b, 0x7b, 0xf3, 0x4e, 0x6d, 0x87, 0x78, 0x32, 0xb9, 0xf5, 0x63, 0xa2, 0x89, 0xa7, 0x56, - 0x3a, 0xd0, 0xe2, 0x8e, 0x9c, 0xec, 0xe7, 0xd9, 0x7c, 0xbf, 0x5a, 0x45, 0x4f, 0x19, 0x5b, 0xc7, - 0x09, 0x7d, 0xeb, 0xb8, 0xb7, 0x5f, 0x1e, 0xb8, 0x5a, 0xd5, 0x62, 0xd2, 0xbc, 0x04, 0xc7, 0x2b, - 0x7e, 0xbd, 0xe2, 0x07, 0xd1, 0xb2, 0x1f, 0xdc, 0x72, 0x82, 0xba, 0x9c, 0x5e, 0x65, 0x19, 0x95, - 0x87, 0xee, 0x9f, 0xfd, 0x7c, 0x77, 0x31, 0x22, 0xee, 0x3c, 0xcf, 0x24, 0xb6, 0x03, 0x3e, 0xb7, - 0xac, 0x31, 0xd9, 0x41, 0xa5, 0xbd, 0xba, 0xe4, 0x44, 0x04, 0x5d, 0x85, 0xd1, 0x9a, 0x7e, 0x8c, - 0x8a, 0xe2, 0x4f, 0xca, 0x83, 0xcc, 0x38, 0x63, 0x33, 0xcf, 0x5d, 0xb3, 0xbc, 0xfd, 0x59, 0x51, - 0x09, 0x57, 0x44, 0x70, 0x97, 0xd6, 0x5e, 0xf2, 0xbf, 0xcb, 0xbc, 0x38, 0x85, 0xfc, 0x98, 0x87, - 0xdc, 0xae, 0xdc, 0x31, 0x2f, 0x8e, 0xfd, 0xdd, 0x70, 0x22, 0x59, 0x7d, 0xcf, 0x49, 0xe8, 0x17, - 0x60, 0x32, 0xd0, 0x0b, 0x6a, 0x49, 0x06, 0x8f, 0xf3, 0x5c, 0x26, 0x09, 0x24, 0x4e, 0xd3, 0xdb, - 0x2f, 0xc2, 0x24, 0xbd, 0xfc, 0x2a, 0x41, 0x8e, 0xf5, 0x72, 0xf7, 0xf0, 0x44, 0xff, 0xb1, 0x9f, - 0x1d, 0x44, 0x89, 0x8c, 0x53, 0xe8, 0x93, 0x30, 0x16, 0x92, 0x55, 0xd7, 0x6b, 0xdf, 0x96, 0xba, - 0xb5, 0x0e, 0xef, 0x8c, 0xab, 0x4b, 0x3a, 0x25, 0xbf, 0x3f, 0x98, 0x30, 0x9c, 0xe0, 0x86, 0x9a, - 0x30, 0x76, 0xcb, 0xf5, 0xea, 0xfe, 0xad, 0x50, 0xf2, 0x1f, 0xca, 0x57, 0xd4, 0xdf, 0xe0, 0x94, - 0x89, 0x36, 0x1a, 0xd5, 0xdd, 0x30, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0xb6, 0x37, 0x17, - 0x5e, 0x0b, 0x09, 0x7f, 0x39, 0x2a, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, - 0x2e, 0x05, 0x7e, 0x9b, 0xa7, 0x37, 0x12, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, - 0xfe, 0xad, 0xfb, 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x3d, 0x9f, 0x06, 0xc7, 0x06, 0x15, - 0x5a, 0x06, 0x14, 0xb6, 0x5b, 0xad, 0x06, 0x73, 0x5d, 0x74, 0x1a, 0x8c, 0x15, 0x77, 0xbb, 0x2a, - 0x72, 0xef, 0x96, 0x6a, 0x0a, 0x8b, 0x33, 0x4a, 0xd0, 0x73, 0x71, 0x53, 0x34, 0xb5, 0x9f, 0x35, - 0x95, 0x1b, 0xf5, 0xaa, 0xbc, 0x9d, 0x12, 0x87, 0x96, 0x60, 0x30, 0xdc, 0x0b, 0x6b, 0x51, 0x23, - 0xec, 0x94, 0x0c, 0xb1, 0xca, 0x48, 0xb4, 0x5c, 0xbc, 0xbc, 0x08, 0x96, 0x65, 0x51, 0x0d, 0xa6, - 0x04, 0xc7, 0x85, 0x6d, 0xc7, 0x53, 0x29, 0xda, 0xb8, 0xf7, 0xde, 0xc5, 0xbb, 0xfb, 0xe5, 0x29, - 0x51, 0xb3, 0x8e, 0xbe, 0xb7, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, - 0xab, 0xf9, 0xcd, 0x56, 0x25, 0xf0, 0x37, 0xdd, 0x06, 0xe9, 0x64, 0x18, 0xad, 0x1a, 0x94, 0x62, - 0xf2, 0x19, 0x30, 0x9c, 0xe0, 0x66, 0x7f, 0x96, 0xc9, 0x8e, 0x55, 0x77, 0xcb, 0x73, 0xa2, 0x76, - 0x40, 0x50, 0x13, 0x46, 0x5b, 0x6c, 0x77, 0x11, 0x49, 0x87, 0xc4, 0x5c, 0x7f, 0xa1, 0x47, 0xfd, - 0xd7, 0x2d, 0x96, 0x36, 0xd1, 0xf0, 0x83, 0xac, 0xe8, 0xec, 0xb0, 0xc9, 0xdd, 0xfe, 0x17, 0x27, - 0x99, 0xf4, 0x51, 0xe5, 0x4a, 0xad, 0x41, 0xf1, 0x6c, 0x4c, 0x5c, 0x63, 0x67, 0xf2, 0xd5, 0xc7, - 0xf1, 0xb0, 0x88, 0xa7, 0x67, 0x58, 0x96, 0x45, 0x9f, 0x80, 0x31, 0x7a, 0x2b, 0x54, 0x12, 0x40, - 0x38, 0x7d, 0x2c, 0x3f, 0xbc, 0x8f, 0xa2, 0xd2, 0x13, 0x92, 0xe9, 0x85, 0x71, 0x82, 0x19, 0x7a, - 0x83, 0xb9, 0x06, 0x4a, 0xd6, 0x85, 0x5e, 0x58, 0xeb, 0x5e, 0x80, 0x92, 0xad, 0xc6, 0x04, 0xb5, - 0x61, 0x2a, 0x9d, 0x76, 0x35, 0x9c, 0xb6, 0xf3, 0xc5, 0xeb, 0x74, 0xe6, 0xd4, 0x38, 0x73, 0x54, - 0x1a, 0x17, 0xe2, 0x2c, 0xfe, 0x68, 0x35, 0x99, 0x14, 0xb3, 0x68, 0x28, 0x9e, 0x53, 0x89, 0x31, - 0x47, 0x3b, 0xe6, 0xc3, 0xdc, 0x82, 0xd3, 0x5a, 0x5e, 0xc1, 0x4b, 0x81, 0xc3, 0x5c, 0x53, 0x5c, - 0xb6, 0x9d, 0x6a, 0x72, 0xd1, 0x23, 0x77, 0xf7, 0xcb, 0xa7, 0x37, 0x3a, 0x11, 0xe2, 0xce, 0x7c, - 0xd0, 0x55, 0x38, 0xce, 0x83, 0x53, 0x2c, 0x12, 0xa7, 0xde, 0x70, 0x3d, 0x25, 0x78, 0xf1, 0x25, - 0x7f, 0xf2, 0xee, 0x7e, 0xf9, 0xf8, 0x5c, 0x16, 0x01, 0xce, 0x2e, 0x87, 0x5e, 0x85, 0x52, 0xdd, - 0x0b, 0x45, 0x1f, 0x0c, 0x18, 0xa9, 0x1b, 0x4b, 0x8b, 0xeb, 0x55, 0xf5, 0xfd, 0xf1, 0x1f, 0x1c, - 0x17, 0x40, 0x5b, 0xdc, 0xf2, 0xa1, 0xd4, 0x55, 0x83, 0xa9, 0x98, 0x85, 0x49, 0x8d, 0xae, 0xf1, - 0x3c, 0x9d, 0x9b, 0xfc, 0xd4, 0xab, 0x2d, 0xe3, 0xe5, 0xba, 0xc1, 0x18, 0xbd, 0x0e, 0x48, 0xa4, - 0x08, 0x99, 0xab, 0xb1, 0x8c, 0x56, 0x9a, 0x3b, 0xa2, 0xba, 0x85, 0x56, 0x53, 0x14, 0x38, 0xa3, - 0x14, 0xba, 0x4c, 0x77, 0x15, 0x1d, 0x2a, 0x76, 0x2d, 0x95, 0x20, 0x78, 0x91, 0xb4, 0x02, 0xc2, - 0x3c, 0xe8, 0x4c, 0x8e, 0x38, 0x51, 0x0e, 0xd5, 0xe1, 0x94, 0xd3, 0x8e, 0x7c, 0x66, 0x54, 0x32, - 0x49, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0x7b, 0xee, 0x10, 0x8b, 0x85, 0x78, 0x6a, 0xae, 0x03, 0x1d, - 0xee, 0xc8, 0x85, 0x4a, 0xe4, 0x32, 0xe7, 0xbf, 0xb0, 0xf7, 0x18, 0x2f, 0x6d, 0xb9, 0x11, 0x54, - 0x52, 0xa0, 0x17, 0x61, 0x78, 0xdb, 0x0f, 0xa3, 0x75, 0x12, 0xdd, 0xf2, 0x83, 0x1d, 0x11, 0x93, - 0x3c, 0xce, 0x03, 0x11, 0xa3, 0xb0, 0x4e, 0x47, 0xaf, 0xdc, 0xcc, 0xdb, 0x68, 0x65, 0x91, 0x39, - 0x7a, 0x0c, 0xc5, 0x7b, 0xcc, 0x65, 0x0e, 0xc6, 0x12, 0x2f, 0x49, 0x57, 0x2a, 0x0b, 0xcc, 0x69, - 0x23, 0x41, 0xba, 0x52, 0x59, 0xc0, 0x12, 0x4f, 0xa7, 0x6b, 0xb8, 0xed, 0x04, 0xa4, 0x12, 0xf8, - 0x35, 0x12, 0x6a, 0xd9, 0x47, 0x1e, 0xe6, 0x11, 0xd7, 0xe9, 0x74, 0xad, 0x66, 0x11, 0xe0, 0xec, - 0x72, 0x88, 0xa4, 0x73, 0x6a, 0x8e, 0xe5, 0x5b, 0xdb, 0xd2, 0xf2, 0x4c, 0x8f, 0x69, 0x35, 0x3d, - 0x98, 0x50, 0xd9, 0x3c, 0x79, 0x8c, 0xf5, 0x70, 0x7a, 0x9c, 0xcd, 0xed, 0xde, 0x03, 0xb4, 0x2b, - 0xfb, 0xe5, 0x4a, 0x82, 0x13, 0x4e, 0xf1, 0x36, 0x82, 0x6d, 0x4e, 0x74, 0x0d, 0xb6, 0x79, 0x01, - 0x4a, 0x61, 0xfb, 0x66, 0xdd, 0x6f, 0x3a, 0xae, 0xc7, 0x9c, 0x36, 0xb4, 0xbb, 0x5f, 0x55, 0x22, - 0x70, 0x4c, 0x83, 0x96, 0x61, 0xc8, 0x91, 0xc6, 0x49, 0x94, 0x1f, 0x47, 0x4c, 0x99, 0x24, 0x79, - 0x68, 0x1d, 0x69, 0x8e, 0x54, 0x65, 0xd1, 0x2b, 0x30, 0x2a, 0x82, 0x2b, 0x88, 0x04, 0xd8, 0x53, - 0xe6, 0x0b, 0xd8, 0xaa, 0x8e, 0xc4, 0x26, 0x2d, 0xba, 0x06, 0xc3, 0x91, 0xdf, 0x60, 0xcf, 0x38, - 0xa9, 0x98, 0x77, 0x22, 0x3f, 0xdc, 0xe7, 0x86, 0x22, 0xd3, 0xd5, 0xe6, 0xaa, 0x28, 0xd6, 0xf9, - 0xa0, 0x0d, 0x3e, 0xdf, 0x59, 0xae, 0x11, 0x12, 0x8a, 0x0c, 0xca, 0xa7, 0xf3, 0x3c, 0xee, 0x18, - 0x99, 0xb9, 0x1c, 0x44, 0x49, 0xac, 0xb3, 0x41, 0x97, 0x60, 0xb2, 0x15, 0xb8, 0x3e, 0x9b, 0x13, - 0xca, 0xd8, 0x3a, 0x6d, 0x66, 0x16, 0xac, 0x24, 0x09, 0x70, 0xba, 0x0c, 0x8b, 0x8d, 0x21, 0x80, - 0xd3, 0x27, 0x79, 0x76, 0x24, 0x7e, 0x95, 0xe6, 0x30, 0xac, 0xb0, 0x68, 0x8d, 0xed, 0xc4, 0x5c, - 0x0b, 0x34, 0x3d, 0x93, 0x1f, 0xba, 0x4c, 0xd7, 0x16, 0x71, 0xe1, 0x55, 0xfd, 0xc5, 0x31, 0x07, - 0x54, 0xd7, 0x92, 0x12, 0xd3, 0x2b, 0x40, 0x38, 0x7d, 0xaa, 0x83, 0xcb, 0x67, 0xe2, 0x56, 0x16, - 0x0b, 0x04, 0x06, 0x38, 0xc4, 0x09, 0x9e, 0xe8, 0x23, 0x30, 0x21, 0xe2, 0xd0, 0xc6, 0xdd, 0x74, - 0x3a, 0x7e, 0x16, 0x83, 0x13, 0x38, 0x9c, 0xa2, 0xe6, 0xd9, 0x89, 0x9c, 0x9b, 0x0d, 0x22, 0xb6, - 0xbe, 0x55, 0xd7, 0xdb, 0x09, 0xa7, 0xcf, 0xb0, 0xfd, 0x41, 0x64, 0x27, 0x4a, 0x62, 0x71, 0x46, - 0x09, 0xb4, 0x01, 0x13, 0xad, 0x80, 0x90, 0x26, 0x13, 0xf4, 0xc5, 0x79, 0x56, 0xe6, 0xa1, 0x61, - 0x68, 0x4b, 0x2a, 0x09, 0xdc, 0xbd, 0x0c, 0x18, 0x4e, 0x71, 0x40, 0xb7, 0x60, 0xc8, 0xdf, 0x25, - 0xc1, 0x36, 0x71, 0xea, 0xd3, 0x67, 0x3b, 0x3c, 0xd6, 0x12, 0x87, 0xdb, 0x55, 0x41, 0x9b, 0xf0, - 0x65, 0x91, 0xe0, 0xee, 0xbe, 0x2c, 0xb2, 0x32, 0xf4, 0x7f, 0x5a, 0x70, 0x52, 0x5a, 0x87, 0xaa, - 0x2d, 0xda, 0xeb, 0x0b, 0xbe, 0x17, 0x46, 0x01, 0x0f, 0x66, 0xf2, 0x48, 0x7e, 0x80, 0x8f, 0x8d, - 0x9c, 0x42, 0x4a, 0x11, 0x7d, 0x32, 0x8f, 0x22, 0xc4, 0xf9, 0x35, 0xd2, 0xab, 0x69, 0x48, 0x22, - 0xb9, 0x19, 0xcd, 0x85, 0xcb, 0x6f, 0x2c, 0xae, 0x4f, 0x3f, 0xca, 0x23, 0xb1, 0xd0, 0xc5, 0x50, - 0x4d, 0x22, 0x71, 0x9a, 0x1e, 0x5d, 0x84, 0x82, 0x1f, 0x4e, 0x3f, 0xd6, 0x21, 0x8f, 0xb5, 0x5f, - 0xbf, 0x5a, 0xe5, 0x3e, 0x8d, 0x57, 0xab, 0xb8, 0xe0, 0x87, 0x32, 0x43, 0x10, 0xbd, 0x8f, 0x85, - 0xd3, 0x8f, 0x73, 0xb5, 0xa5, 0xcc, 0x10, 0xc4, 0x80, 0x38, 0xc6, 0xa3, 0x6d, 0x18, 0x0f, 0x8d, - 0x7b, 0x6f, 0x38, 0x7d, 0x8e, 0xf5, 0xd4, 0xe3, 0x79, 0x83, 0x66, 0x50, 0x6b, 0xa9, 0x3b, 0x4c, - 0x2e, 0x38, 0xc9, 0x96, 0xaf, 0x2e, 0xed, 0xe6, 0x1d, 0x4e, 0x3f, 0xd1, 0x65, 0x75, 0x69, 0xc4, - 0xfa, 0xea, 0xd2, 0x79, 0xe0, 0x04, 0xcf, 0x99, 0xef, 0x82, 0xc9, 0x94, 0xb8, 0x74, 0x10, 0xff, - 0xfd, 0x99, 0x1d, 0x18, 0x35, 0xa6, 0xe4, 0x03, 0x75, 0xef, 0xf8, 0x9d, 0x12, 0x94, 0x94, 0xd9, - 0x1d, 0x5d, 0x30, 0x3d, 0x3a, 0x4e, 0x26, 0x3d, 0x3a, 0x86, 0x2a, 0x7e, 0xdd, 0x70, 0xe2, 0xd8, - 0xc8, 0x88, 0xd7, 0x99, 0xb7, 0x01, 0xf6, 0xfe, 0xc8, 0x48, 0x33, 0x25, 0x14, 0x7b, 0x76, 0x0d, - 0xe9, 0xeb, 0x68, 0x9d, 0xb8, 0x04, 0x93, 0x9e, 0xcf, 0x64, 0x74, 0x52, 0x97, 0x02, 0x18, 0x93, - 0xb3, 0x4a, 0x7a, 0x00, 0xac, 0x04, 0x01, 0x4e, 0x97, 0xa1, 0x15, 0x72, 0x41, 0x29, 0x69, 0x0e, - 0xe1, 0x72, 0x14, 0x16, 0x58, 0x7a, 0x37, 0xe4, 0xbf, 0xc2, 0xe9, 0x89, 0xfc, 0xbb, 0x21, 0x2f, - 0x94, 0x14, 0xc6, 0x42, 0x29, 0x8c, 0x31, 0xed, 0x7f, 0xcb, 0xaf, 0xaf, 0x54, 0x84, 0x98, 0xaf, - 0x45, 0xd2, 0xae, 0xaf, 0x54, 0x30, 0xc7, 0xa1, 0x39, 0x18, 0x60, 0x3f, 0xc2, 0xe9, 0x91, 0xfc, - 0x80, 0x49, 0xac, 0x84, 0x96, 0xa1, 0x90, 0x15, 0xc0, 0xa2, 0x20, 0xd3, 0xee, 0xd2, 0xbb, 0x11, - 0xd3, 0xee, 0x0e, 0xde, 0xa7, 0x76, 0x57, 0x32, 0xc0, 0x31, 0x2f, 0x74, 0x1b, 0x8e, 0x1b, 0xf7, - 0x51, 0xf5, 0xea, 0x0a, 0xf2, 0x0d, 0xbf, 0x09, 0xe2, 0xf9, 0xd3, 0xa2, 0xd1, 0xc7, 0x57, 0xb2, - 0x38, 0xe1, 0xec, 0x0a, 0x50, 0x03, 0x26, 0x6b, 0xa9, 0x5a, 0x87, 0x7a, 0xaf, 0x55, 0xcd, 0x8b, - 0x74, 0x8d, 0x69, 0xc6, 0xe8, 0x15, 0x18, 0x7a, 0xdb, 0xe7, 0x4e, 0x5a, 0xe2, 0x6a, 0x22, 0x23, - 0x7e, 0x0c, 0xbd, 0x71, 0xb5, 0xca, 0xe0, 0xf7, 0xf6, 0xcb, 0xc3, 0x15, 0xbf, 0x2e, 0xff, 0x62, - 0x55, 0x00, 0xfd, 0x80, 0x05, 0x33, 0xe9, 0x0b, 0xaf, 0x6a, 0xf4, 0x68, 0xef, 0x8d, 0xb6, 0x45, - 0xa5, 0x33, 0x4b, 0xb9, 0xec, 0x70, 0x87, 0xaa, 0xd0, 0x87, 0xe8, 0x7a, 0x0a, 0xdd, 0x3b, 0x44, - 0xa4, 0x77, 0x7e, 0x24, 0x5e, 0x4f, 0x14, 0x7a, 0x6f, 0xbf, 0x3c, 0xce, 0x77, 0x46, 0xf7, 0x8e, - 0x8a, 0xf9, 0xcd, 0x0b, 0xa0, 0xef, 0x86, 0xe3, 0x41, 0x5a, 0x83, 0x4a, 0xa4, 0x10, 0xfe, 0x54, - 0x2f, 0xbb, 0x6c, 0x72, 0xc0, 0x71, 0x16, 0x43, 0x9c, 0x5d, 0x8f, 0xfd, 0xab, 0x16, 0xd3, 0x6f, - 0x8b, 0x66, 0x91, 0xb0, 0xdd, 0x38, 0x8a, 0xa4, 0xf2, 0x4b, 0x86, 0xed, 0xf8, 0xbe, 0x3d, 0x9b, - 0xfe, 0xb1, 0xc5, 0x3c, 0x9b, 0x8e, 0xf0, 0x8d, 0xd6, 0x1b, 0x30, 0x14, 0xc9, 0x64, 0xff, 0x1d, - 0xf2, 0xe0, 0x6b, 0x8d, 0x62, 0xde, 0x5d, 0xea, 0x92, 0xa3, 0xf2, 0xfa, 0x2b, 0x36, 0xf6, 0x3f, - 0xe0, 0x23, 0x20, 0x31, 0x47, 0x60, 0xa2, 0x5b, 0x34, 0x4d, 0x74, 0xe5, 0x2e, 0x5f, 0x90, 0x63, - 0xaa, 0xfb, 0xfb, 0x66, 0xbb, 0x99, 0x72, 0xef, 0xdd, 0xee, 0x52, 0x67, 0x7f, 0xc1, 0x02, 0x88, - 0x93, 0x2c, 0xf4, 0x90, 0xce, 0xf5, 0x25, 0x7a, 0xad, 0xf1, 0x23, 0xbf, 0xe6, 0x37, 0x84, 0x81, - 0xe2, 0x54, 0x6c, 0x25, 0xe4, 0xf0, 0x7b, 0xda, 0x6f, 0xac, 0xa8, 0x51, 0x59, 0x46, 0x3d, 0x2d, - 0xc6, 0x76, 0x6b, 0x23, 0xe2, 0xe9, 0x97, 0x2d, 0x38, 0x96, 0xe5, 0xf0, 0x4f, 0x2f, 0xc9, 0x5c, - 0xcd, 0xa9, 0xdc, 0x1d, 0xd5, 0x68, 0x5e, 0x17, 0x70, 0xac, 0x28, 0x7a, 0xce, 0x93, 0x7b, 0xb0, - 0x04, 0x00, 0x57, 0x61, 0xb4, 0x12, 0x10, 0x4d, 0xbe, 0x78, 0x8d, 0x47, 0xd2, 0xe1, 0xed, 0x79, - 0xe6, 0xc0, 0x51, 0x74, 0xec, 0xaf, 0x14, 0xe0, 0x18, 0x77, 0xda, 0x99, 0xdb, 0xf5, 0xdd, 0x7a, - 0xc5, 0xaf, 0x8b, 0x67, 0x9a, 0x6f, 0xc2, 0x48, 0x4b, 0xd3, 0x4d, 0x77, 0x0a, 0x66, 0xad, 0xeb, - 0xb0, 0x63, 0x6d, 0x9a, 0x0e, 0xc5, 0x06, 0x2f, 0x54, 0x87, 0x11, 0xb2, 0xeb, 0xd6, 0x94, 0xe7, - 0x47, 0xe1, 0xc0, 0x87, 0xb4, 0xaa, 0x65, 0x49, 0xe3, 0x83, 0x0d, 0xae, 0x3d, 0xbb, 0xda, 0x6a, - 0x22, 0x5a, 0x5f, 0x17, 0x6f, 0x8f, 0x1f, 0xb5, 0xe0, 0xa1, 0x9c, 0xd0, 0xd7, 0xb4, 0xba, 0x5b, - 0xcc, 0x3d, 0x4a, 0x4c, 0x5b, 0x55, 0x1d, 0x77, 0x9a, 0xc2, 0x02, 0x8b, 0x3e, 0x0a, 0xc0, 0x9d, - 0x9e, 0x88, 0x57, 0xeb, 0x1a, 0x23, 0xd8, 0x08, 0x6f, 0xaa, 0x45, 0xaa, 0x94, 0xe5, 0xb1, 0xc6, - 0xcb, 0xfe, 0x72, 0x1f, 0xf4, 0x33, 0x27, 0x1b, 0x54, 0x81, 0xc1, 0x6d, 0x9e, 0x25, 0xae, 0xe3, - 0xb8, 0x51, 0x5a, 0x99, 0x78, 0x2e, 0x1e, 0x37, 0x0d, 0x8a, 0x25, 0x1b, 0xb4, 0x06, 0x53, 0x3c, - 0x59, 0x5f, 0x63, 0x91, 0x34, 0x9c, 0x3d, 0xa9, 0xf6, 0xe5, 0xf9, 0xe7, 0x95, 0xfa, 0x7b, 0x25, - 0x4d, 0x82, 0xb3, 0xca, 0xa1, 0xd7, 0x60, 0x8c, 0x5e, 0xc3, 0xfd, 0x76, 0x24, 0x39, 0xf1, 0x34, - 0x7d, 0xea, 0x66, 0xb2, 0x61, 0x60, 0x71, 0x82, 0x1a, 0xbd, 0x02, 0xa3, 0xad, 0x94, 0x82, 0xbb, - 0x3f, 0xd6, 0x04, 0x99, 0x4a, 0x6d, 0x93, 0x96, 0xf9, 0xfc, 0xb7, 0xd9, 0x0b, 0x87, 0x8d, 0xed, - 0x80, 0x84, 0xdb, 0x7e, 0xa3, 0xce, 0x24, 0xe0, 0x7e, 0xcd, 0xe7, 0x3f, 0x81, 0xc7, 0xa9, 0x12, - 0x94, 0xcb, 0xa6, 0xe3, 0x36, 0xda, 0x01, 0x89, 0xb9, 0x0c, 0x98, 0x5c, 0x96, 0x13, 0x78, 0x9c, - 0x2a, 0xd1, 0x5d, 0x73, 0x3f, 0x78, 0x38, 0x9a, 0x7b, 0xfb, 0xa7, 0x0b, 0x60, 0x0c, 0xed, 0x77, - 0x70, 0xfa, 0xc0, 0x57, 0xa1, 0x6f, 0x2b, 0x68, 0xd5, 0x84, 0x43, 0x59, 0xe6, 0x97, 0xc5, 0xb9, - 0xc3, 0xf9, 0x97, 0xd1, 0xff, 0x98, 0x95, 0xa2, 0x6b, 0xfc, 0x78, 0x25, 0xf0, 0xe9, 0x21, 0x27, - 0x63, 0x2d, 0xaa, 0xa7, 0x35, 0x83, 0x32, 0x48, 0x44, 0x87, 0xa8, 0xc4, 0xe2, 0x7d, 0x00, 0xe7, - 0x60, 0xf8, 0x5e, 0x55, 0x45, 0x28, 0x18, 0xc9, 0x05, 0x5d, 0x84, 0x61, 0x91, 0xd1, 0x8d, 0xbd, - 0x00, 0xe1, 0x8b, 0x89, 0xf9, 0x8a, 0x2d, 0xc6, 0x60, 0xac, 0xd3, 0xd8, 0x3f, 0x58, 0x80, 0xa9, - 0x8c, 0x27, 0x7c, 0xfc, 0x18, 0xd9, 0x72, 0xc3, 0x48, 0xa5, 0x27, 0xd7, 0x8e, 0x11, 0x0e, 0xc7, - 0x8a, 0x82, 0xee, 0x55, 0xfc, 0xa0, 0x4a, 0x1e, 0x4e, 0xe2, 0x89, 0x8c, 0xc0, 0x1e, 0x30, 0xd1, - 0xf7, 0x59, 0xe8, 0x6b, 0x87, 0x44, 0xc6, 0x13, 0x57, 0xc7, 0x36, 0x33, 0x6b, 0x33, 0x0c, 0xbd, - 0x02, 0x6e, 0x29, 0x0b, 0xb1, 0x76, 0x05, 0xe4, 0x36, 0x62, 0x8e, 0xa3, 0x8d, 0x8b, 0x88, 0xe7, - 0x78, 0x91, 0xb8, 0x28, 0xc6, 0x81, 0x71, 0x19, 0x14, 0x0b, 0xac, 0xfd, 0xa5, 0x22, 0x9c, 0xcc, - 0x7d, 0xd4, 0x4b, 0x9b, 0xde, 0xf4, 0x3d, 0x37, 0xf2, 0x95, 0x13, 0x1e, 0x0f, 0x86, 0x4b, 0x5a, - 0xdb, 0x6b, 0x02, 0x8e, 0x15, 0x05, 0x3a, 0x07, 0xfd, 0x4c, 0x29, 0x9e, 0x4a, 0xd4, 0x3e, 0xbf, - 0xc8, 0xa3, 0x23, 0x72, 0xb4, 0x76, 0xaa, 0x17, 0x3b, 0x9e, 0xea, 0x8f, 0x52, 0x09, 0xc6, 0x6f, - 0x24, 0x0f, 0x14, 0xda, 0x5c, 0xdf, 0x6f, 0x60, 0x86, 0x44, 0x8f, 0x8b, 0xfe, 0x4a, 0x78, 0x9d, - 0x61, 0xa7, 0xee, 0x87, 0x5a, 0xa7, 0x3d, 0x09, 0x83, 0x3b, 0x64, 0x2f, 0x70, 0xbd, 0xad, 0xa4, - 0x37, 0xe2, 0x15, 0x0e, 0xc6, 0x12, 0x6f, 0xe6, 0x0c, 0x1e, 0x3c, 0x8c, 0x9c, 0xc1, 0xfa, 0x0c, - 0x18, 0xea, 0x2a, 0x9e, 0xfc, 0x50, 0x11, 0xc6, 0xf1, 0xfc, 0xe2, 0x7b, 0x03, 0x71, 0x2d, 0x3d, - 0x10, 0x87, 0x91, 0x5a, 0xf7, 0x60, 0xa3, 0xf1, 0x4b, 0x16, 0x8c, 0xb3, 0xbc, 0x72, 0x22, 0x22, - 0x87, 0xeb, 0x7b, 0x47, 0x70, 0x15, 0x78, 0x14, 0xfa, 0x03, 0x5a, 0x69, 0x32, 0x43, 0x3b, 0x6b, - 0x09, 0xe6, 0x38, 0x74, 0x0a, 0xfa, 0x58, 0x13, 0xe8, 0xe0, 0x8d, 0xf0, 0x2d, 0x78, 0xd1, 0x89, - 0x1c, 0xcc, 0xa0, 0x2c, 0x36, 0x20, 0x26, 0xad, 0x86, 0xcb, 0x1b, 0x1d, 0xbb, 0x2c, 0xbc, 0x3b, - 0xc2, 0x7d, 0x64, 0x36, 0xed, 0x9d, 0xc5, 0x06, 0xcc, 0x66, 0xd9, 0xf9, 0x9a, 0xfd, 0x17, 0x05, - 0x38, 0x93, 0x59, 0xae, 0xe7, 0xd8, 0x80, 0x9d, 0x4b, 0x3f, 0xc8, 0x14, 0x59, 0xc5, 0x23, 0xf4, - 0xf5, 0xee, 0xeb, 0x55, 0xfa, 0xef, 0xef, 0x21, 0x64, 0x5f, 0x66, 0x97, 0xbd, 0x4b, 0x42, 0xf6, - 0x65, 0xb6, 0x2d, 0x47, 0x4d, 0xf0, 0x37, 0x85, 0x9c, 0x6f, 0x61, 0x0a, 0x83, 0xf3, 0x74, 0x9f, - 0x61, 0xc8, 0x50, 0x5e, 0xc2, 0xf9, 0x1e, 0xc3, 0x61, 0x58, 0x61, 0xd1, 0x1c, 0x8c, 0x37, 0x5d, - 0x8f, 0x6e, 0x3e, 0x7b, 0xa6, 0x28, 0xae, 0x6c, 0x19, 0x6b, 0x26, 0x1a, 0x27, 0xe9, 0x91, 0xab, - 0x85, 0xf3, 0xe3, 0x5f, 0xf7, 0xca, 0x81, 0x56, 0xdd, 0xac, 0xe9, 0xce, 0xa1, 0x7a, 0x31, 0x23, - 0xb4, 0xdf, 0x9a, 0xa6, 0x27, 0x2a, 0xf6, 0xae, 0x27, 0x1a, 0xc9, 0xd6, 0x11, 0xcd, 0xbc, 0x02, - 0xa3, 0xf7, 0x6d, 0x1b, 0xb1, 0xbf, 0x51, 0x84, 0x87, 0x3b, 0x2c, 0x7b, 0xbe, 0xd7, 0x1b, 0x63, - 0xa0, 0xed, 0xf5, 0xa9, 0x71, 0xa8, 0xc0, 0xb1, 0xcd, 0x76, 0xa3, 0xb1, 0xc7, 0x1e, 0x35, 0x91, - 0xba, 0xa4, 0x10, 0x32, 0xa5, 0x54, 0x8e, 0x1c, 0x5b, 0xce, 0xa0, 0xc1, 0x99, 0x25, 0xe9, 0x15, - 0x8b, 0x9e, 0x24, 0x7b, 0x8a, 0x55, 0xe2, 0x8a, 0x85, 0x75, 0x24, 0x36, 0x69, 0xd1, 0x25, 0x98, - 0x74, 0x76, 0x1d, 0x97, 0xe7, 0x44, 0x90, 0x0c, 0xf8, 0x1d, 0x4b, 0xe9, 0xa2, 0xe7, 0x92, 0x04, - 0x38, 0x5d, 0x06, 0xbd, 0x0e, 0xc8, 0xbf, 0xc9, 0x1e, 0x4a, 0xd4, 0x2f, 0x11, 0x4f, 0x58, 0xdd, - 0xd9, 0xd8, 0x15, 0xe3, 0x2d, 0xe1, 0x6a, 0x8a, 0x02, 0x67, 0x94, 0x4a, 0x04, 0x96, 0x1b, 0xc8, - 0x0f, 0x2c, 0xd7, 0x79, 0x5f, 0xec, 0x9a, 0x9d, 0xed, 0x22, 0x8c, 0x1e, 0xd0, 0xfd, 0xd7, 0xfe, - 0xb7, 0x16, 0x28, 0x05, 0xb1, 0x19, 0x18, 0xfa, 0x15, 0xe6, 0x9f, 0xcc, 0x55, 0xdb, 0x5a, 0x2c, - 0xa8, 0xe3, 0x9a, 0x7f, 0x72, 0x8c, 0xc4, 0x26, 0x2d, 0x9f, 0x43, 0x9a, 0x5f, 0xb1, 0x71, 0x2b, - 0x10, 0x71, 0x2b, 0x15, 0x05, 0xfa, 0x18, 0x0c, 0xd6, 0xdd, 0x5d, 0x37, 0x14, 0xca, 0xb1, 0x03, - 0x1b, 0xe3, 0xe2, 0xad, 0x73, 0x91, 0xb3, 0xc1, 0x92, 0x9f, 0xfd, 0x43, 0x85, 0xb8, 0x4f, 0xde, - 0x68, 0xfb, 0x91, 0x73, 0x04, 0x27, 0xf9, 0x25, 0xe3, 0x24, 0x7f, 0x3c, 0x7b, 0xa0, 0xb5, 0x26, - 0xe5, 0x9e, 0xe0, 0x57, 0x13, 0x27, 0xf8, 0x13, 0xdd, 0x59, 0x75, 0x3e, 0xb9, 0xff, 0xa1, 0x05, - 0x93, 0x06, 0xfd, 0x11, 0x1c, 0x20, 0xcb, 0xe6, 0x01, 0xf2, 0x48, 0xd7, 0x6f, 0xc8, 0x39, 0x38, - 0xbe, 0xbf, 0x98, 0x68, 0x3b, 0x3b, 0x30, 0xde, 0x86, 0xbe, 0x6d, 0x27, 0xa8, 0x77, 0x4a, 0x59, - 0x94, 0x2a, 0x34, 0x7b, 0xd9, 0x09, 0x84, 0xa7, 0xc2, 0x33, 0xb2, 0xd7, 0x29, 0xa8, 0xab, 0x97, - 0x02, 0xab, 0x0a, 0xbd, 0x04, 0x03, 0x61, 0xcd, 0x6f, 0xa9, 0x37, 0x53, 0x2c, 0xe5, 0x6f, 0x95, - 0x41, 0xee, 0xed, 0x97, 0x91, 0x59, 0x1d, 0x05, 0x63, 0x41, 0x8f, 0xde, 0x84, 0x51, 0xf6, 0x4b, - 0xb9, 0x0d, 0x16, 0xf3, 0x35, 0x18, 0x55, 0x9d, 0x90, 0xfb, 0xd4, 0x1a, 0x20, 0x6c, 0xb2, 0x9a, - 0xd9, 0x82, 0x92, 0xfa, 0xac, 0x07, 0x6a, 0xed, 0xfe, 0x57, 0x45, 0x98, 0xca, 0x98, 0x73, 0x28, - 0x34, 0x46, 0xe2, 0x62, 0x8f, 0x53, 0xf5, 0x1d, 0x8e, 0x45, 0xc8, 0x2e, 0x50, 0x75, 0x31, 0xb7, - 0x7a, 0xae, 0xf4, 0x5a, 0x48, 0x92, 0x95, 0x52, 0x50, 0xf7, 0x4a, 0x69, 0x65, 0x47, 0xd6, 0xd5, - 0xb4, 0x22, 0xd5, 0xd2, 0x07, 0x3a, 0xa6, 0xbf, 0xd1, 0x07, 0xc7, 0xb2, 0xe2, 0x09, 0xa3, 0xcf, - 0x24, 0xf2, 0x88, 0xbf, 0xd0, 0xa9, 0x87, 0xf5, 0x92, 0x3c, 0xb9, 0xb8, 0x08, 0xe3, 0x39, 0x6b, - 0x66, 0x16, 0xef, 0xda, 0xcd, 0xa2, 0x4e, 0x16, 0x5e, 0x27, 0xe0, 0xf9, 0xdf, 0xe5, 0xf6, 0xf1, - 0x81, 0x9e, 0x1b, 0x20, 0x12, 0xc7, 0x87, 0x09, 0x97, 0x24, 0x09, 0xee, 0xee, 0x92, 0x24, 0x6b, - 0x46, 0x2b, 0x30, 0x50, 0xe3, 0xbe, 0x2e, 0xc5, 0xee, 0x5b, 0x18, 0x77, 0x74, 0x51, 0x1b, 0xb0, - 0x70, 0x70, 0x11, 0x0c, 0x66, 0x5c, 0x18, 0xd6, 0x3a, 0xe6, 0x81, 0x4e, 0x9e, 0x1d, 0x7a, 0xf0, - 0x69, 0x5d, 0xf0, 0x40, 0x27, 0xd0, 0x8f, 0x5a, 0x90, 0x78, 0xf0, 0xa2, 0x94, 0x72, 0x56, 0xae, - 0x52, 0xee, 0x2c, 0xf4, 0x05, 0x7e, 0x83, 0x24, 0x93, 0x54, 0x63, 0xbf, 0x41, 0x30, 0xc3, 0x50, - 0x8a, 0x28, 0x56, 0xb5, 0x8c, 0xe8, 0xd7, 0x48, 0x71, 0x41, 0x7c, 0x14, 0xfa, 0x1b, 0x64, 0x97, - 0x34, 0x92, 0xb9, 0x04, 0x57, 0x29, 0x10, 0x73, 0x9c, 0xfd, 0x4b, 0x7d, 0x70, 0xba, 0x63, 0xac, - 0x2b, 0x7a, 0x19, 0xdb, 0x72, 0x22, 0x72, 0xcb, 0xd9, 0x4b, 0x26, 0xfd, 0xba, 0xc4, 0xc1, 0x58, - 0xe2, 0xd9, 0xf3, 0x4f, 0x9e, 0xbb, 0x23, 0xa1, 0xc2, 0x14, 0x29, 0x3b, 0x04, 0xd6, 0x54, 0x89, - 0x15, 0x0f, 0x43, 0x25, 0xf6, 0x1c, 0x40, 0x18, 0x36, 0xb8, 0x5b, 0x60, 0x5d, 0xbc, 0x2b, 0x8d, - 0x73, 0xbc, 0x54, 0x57, 0x05, 0x06, 0x6b, 0x54, 0x68, 0x11, 0x26, 0x5a, 0x81, 0x1f, 0x71, 0x8d, - 0xf0, 0x22, 0xf7, 0x9c, 0xed, 0x37, 0xc3, 0x0c, 0x55, 0x12, 0x78, 0x9c, 0x2a, 0x81, 0x5e, 0x84, - 0x61, 0x11, 0x7a, 0xa8, 0xe2, 0xfb, 0x0d, 0xa1, 0x84, 0x52, 0xce, 0xa4, 0xd5, 0x18, 0x85, 0x75, - 0x3a, 0xad, 0x18, 0x53, 0x33, 0x0f, 0x66, 0x16, 0xe3, 0xaa, 0x66, 0x8d, 0x2e, 0x11, 0xa6, 0x7c, - 0xa8, 0xa7, 0x30, 0xe5, 0xb1, 0x5a, 0xae, 0xd4, 0xb3, 0xd5, 0x13, 0xba, 0x2a, 0xb2, 0xbe, 0xda, - 0x07, 0x53, 0x62, 0xe2, 0x3c, 0xe8, 0xe9, 0x72, 0x2d, 0x3d, 0x5d, 0x0e, 0x43, 0x71, 0xf7, 0xde, - 0x9c, 0x39, 0xea, 0x39, 0xf3, 0xc3, 0x16, 0x98, 0x92, 0x1a, 0xfa, 0xdf, 0x73, 0xb3, 0x26, 0xbe, - 0x98, 0x2b, 0xf9, 0xc5, 0x31, 0x8c, 0xdf, 0x59, 0xfe, 0x44, 0xfb, 0x5f, 0x5b, 0xf0, 0x48, 0x57, - 0x8e, 0x68, 0x09, 0x4a, 0x4c, 0x9c, 0xd4, 0x2e, 0x7a, 0x4f, 0x28, 0xcf, 0x7a, 0x89, 0xc8, 0x91, - 0x6e, 0xe3, 0x92, 0x68, 0x29, 0x95, 0x9e, 0xf2, 0xc9, 0x8c, 0xf4, 0x94, 0xc7, 0x8d, 0xee, 0xb9, - 0xcf, 0xfc, 0x94, 0x5f, 0xa4, 0x27, 0x8e, 0xf1, 0xaa, 0x0d, 0x7d, 0xc0, 0x50, 0x3a, 0xda, 0x09, - 0xa5, 0x23, 0x32, 0xa9, 0xb5, 0x33, 0xe4, 0x23, 0x30, 0xc1, 0x62, 0x12, 0xb2, 0x77, 0x1e, 0xe2, - 0xbd, 0x5d, 0x21, 0xf6, 0xe5, 0x5e, 0x4d, 0xe0, 0x70, 0x8a, 0xda, 0xfe, 0xb3, 0x22, 0x0c, 0xf0, - 0xe5, 0x77, 0x04, 0xd7, 0xcb, 0xa7, 0xa1, 0xe4, 0x36, 0x9b, 0x6d, 0x9e, 0x71, 0xb0, 0x3f, 0xf6, - 0x0c, 0x5e, 0x91, 0x40, 0x1c, 0xe3, 0xd1, 0xb2, 0xd0, 0x77, 0x77, 0x08, 0x7b, 0xcc, 0x1b, 0x3e, - 0xbb, 0xe8, 0x44, 0x0e, 0x97, 0x95, 0xd4, 0x39, 0x1b, 0x6b, 0xc6, 0xd1, 0x27, 0x01, 0xc2, 0x28, - 0x70, 0xbd, 0x2d, 0x0a, 0x13, 0xb1, 0xf1, 0x9f, 0xea, 0xc0, 0xad, 0xaa, 0x88, 0x39, 0xcf, 0x78, - 0xcf, 0x51, 0x08, 0xac, 0x71, 0x44, 0xb3, 0xc6, 0x49, 0x3f, 0x93, 0x18, 0x3b, 0xe0, 0x5c, 0xe3, - 0x31, 0x9b, 0xf9, 0x20, 0x94, 0x14, 0xf3, 0x6e, 0xda, 0xaf, 0x11, 0x5d, 0x2c, 0xfa, 0x30, 0x8c, - 0x27, 0xda, 0x76, 0x20, 0xe5, 0xd9, 0x2f, 0x5b, 0x30, 0xce, 0x1b, 0xb3, 0xe4, 0xed, 0x8a, 0xd3, - 0xe0, 0x0e, 0x1c, 0x6b, 0x64, 0xec, 0xca, 0x62, 0xf8, 0x7b, 0xdf, 0xc5, 0x95, 0xb2, 0x2c, 0x0b, - 0x8b, 0x33, 0xeb, 0x40, 0xe7, 0xe9, 0x8a, 0xa3, 0xbb, 0xae, 0xd3, 0x10, 0xf1, 0x0d, 0x46, 0xf8, - 0x6a, 0xe3, 0x30, 0xac, 0xb0, 0xf6, 0x1f, 0x59, 0x30, 0xc9, 0x5b, 0x7e, 0x85, 0xec, 0xa9, 0xbd, - 0xe9, 0x5b, 0xd9, 0x76, 0x91, 0xeb, 0xb6, 0x90, 0x93, 0xeb, 0x56, 0xff, 0xb4, 0x62, 0xc7, 0x4f, - 0xfb, 0x8a, 0x05, 0x62, 0x86, 0x1c, 0x81, 0x3e, 0xe3, 0xbb, 0x4c, 0x7d, 0xc6, 0x4c, 0xfe, 0x22, - 0xc8, 0x51, 0x64, 0xfc, 0xb5, 0x05, 0x13, 0x9c, 0x20, 0xb6, 0xd5, 0x7f, 0x4b, 0xc7, 0x61, 0xde, - 0xfc, 0xa2, 0x4c, 0xe7, 0xcb, 0x2b, 0x64, 0x6f, 0xc3, 0xaf, 0x38, 0xd1, 0x76, 0xf6, 0x47, 0x19, - 0x83, 0xd5, 0xd7, 0x71, 0xb0, 0xea, 0x72, 0x01, 0x19, 0xa9, 0xe0, 0xba, 0x04, 0x08, 0x38, 0x68, - 0x2a, 0x38, 0xfb, 0xcf, 0x2d, 0x40, 0xbc, 0x1a, 0x43, 0x70, 0xa3, 0xe2, 0x10, 0x83, 0x6a, 0x07, - 0x5d, 0xbc, 0x35, 0x29, 0x0c, 0xd6, 0xa8, 0x0e, 0xa5, 0x7b, 0x12, 0x0e, 0x17, 0xc5, 0xee, 0x0e, - 0x17, 0x07, 0xe8, 0xd1, 0x7f, 0x3e, 0x00, 0xc9, 0x97, 0x7d, 0xe8, 0x3a, 0x8c, 0xd4, 0x9c, 0x96, - 0x73, 0xd3, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x4e, 0xde, 0x58, 0x0b, 0x1a, 0x9d, 0x30, 0x91, 0x6b, - 0x10, 0x6c, 0xf0, 0x41, 0xb3, 0x00, 0xad, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x8b, 0xa9, 0x5d, 0x58, - 0x44, 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc0, 0x61, 0x14, - 0xe0, 0xc8, 0xc2, 0x28, 0xf4, 0x1d, 0x28, 0x8c, 0xc2, 0xd0, 0x81, 0xc3, 0x28, 0xf4, 0xf7, 0x14, - 0x46, 0x01, 0xc3, 0x09, 0x29, 0x7b, 0xd2, 0xff, 0xcb, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, - 0x33, 0x73, 0x77, 0xbf, 0x7c, 0x02, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0xa3, 0x30, 0xed, 0x34, - 0x1a, 0xfe, 0x2d, 0x35, 0xa8, 0x4b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0xc8, 0x20, 0xe3, 0x7a, 0xea, - 0xee, 0x7e, 0x79, 0x7a, 0x2e, 0x87, 0x06, 0xe7, 0x96, 0x46, 0xaf, 0x42, 0xa9, 0x15, 0xf8, 0xb5, - 0x35, 0xed, 0xf9, 0xf1, 0x19, 0xda, 0x81, 0x15, 0x09, 0xbc, 0xb7, 0x5f, 0x1e, 0x55, 0x7f, 0xd8, - 0x81, 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x7c, 0xa8, 0x71, 0x11, 0x76, 0x60, 0xaa, 0x4a, 0x02, - 0xd7, 0x69, 0xb8, 0x77, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x0d, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xa7, - 0x50, 0xc4, 0x5a, 0x36, 0x2e, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0xc1, 0xa0, 0x78, 0xc9, - 0x77, 0x04, 0x52, 0xe3, 0x9c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, - 0x25, 0x61, 0x8e, 0x78, 0xa4, 0x13, 0x93, 0xce, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, - 0x4d, 0xf9, 0x83, 0xef, 0x82, 0x75, 0x18, 0x0c, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, - 0x20, 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x00, 0x1f, 0x4b, - 0x77, 0x7b, 0x75, 0xdf, 0x77, 0x18, 0xaf, 0xee, 0xed, 0xaf, 0xb3, 0x93, 0x53, 0x87, 0x1f, 0x81, - 0x50, 0x75, 0xc9, 0x3c, 0x63, 0xed, 0x0e, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x45, 0x0b, - 0x4e, 0x67, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x03, 0x43, 0x4e, 0xbb, 0xee, 0xaa, 0xb5, 0xac, 0x99, - 0x26, 0xe7, 0x04, 0x1c, 0x2b, 0x0a, 0xb4, 0x00, 0x93, 0xe4, 0x76, 0xcb, 0xe5, 0x86, 0x5c, 0xdd, - 0xf9, 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x25, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, - 0x44, 0xfd, 0xbc, 0x05, 0xc3, 0xea, 0x55, 0xef, 0x03, 0xef, 0xed, 0x8f, 0x98, 0xbd, 0xfd, 0x70, - 0x87, 0xde, 0xce, 0xe9, 0xe6, 0x3f, 0x28, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x41, 0x82, 0xbb, - 0xff, 0x87, 0x13, 0x17, 0x61, 0xd8, 0x69, 0xb5, 0x24, 0x42, 0x7a, 0xc0, 0xb1, 0xc0, 0xf2, 0x31, - 0x18, 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x2d, 0x12, - 0x51, 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0xda, 0x91, 0xdb, 0x98, 0x75, 0xbd, 0x28, 0x8c, 0x82, - 0xd9, 0x15, 0x2f, 0xba, 0x1a, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, - 0xc1, 0x82, 0xd5, 0xd1, 0x6f, 0xba, 0x52, 0xac, 0x0b, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, - 0xc3, 0xfa, 0xf4, 0x60, 0xe1, 0xc5, 0x7e, 0x72, 0x44, 0x8d, 0x06, 0x33, 0x8a, 0x2e, 0xea, 0x41, - 0xcc, 0x3a, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x3c, 0xe5, 0x1e, - 0xf3, 0x6c, 0x97, 0x53, 0xe3, 0x00, 0x0e, 0x31, 0x2c, 0xcb, 0x14, 0xcb, 0xc1, 0xb3, 0x52, 0x11, - 0xeb, 0x42, 0xcb, 0x32, 0x25, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa3, 0x38, - 0x18, 0xb1, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x08, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x9c, 0x50, - 0x28, 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x17, 0x61, 0x98, 0xdc, 0x8e, 0x48, 0xe0, 0x39, 0x0d, 0x5a, - 0x43, 0x7f, 0x1c, 0x3f, 0x73, 0x29, 0x06, 0x63, 0x9d, 0x06, 0x6d, 0xc0, 0x78, 0xc8, 0xf5, 0x6c, - 0x2a, 0x04, 0x3e, 0xd7, 0x57, 0x3e, 0xa5, 0xde, 0x53, 0x9b, 0xe8, 0x7b, 0x0c, 0xc4, 0x77, 0x27, - 0x19, 0x65, 0x22, 0xc9, 0x02, 0xbd, 0x06, 0x63, 0x0d, 0xdf, 0xa9, 0xcf, 0x3b, 0x0d, 0xc7, 0xab, - 0xb1, 0xfe, 0x19, 0x32, 0x73, 0x95, 0xaf, 0x1a, 0x58, 0x9c, 0xa0, 0xa6, 0xc2, 0x9b, 0x0e, 0x11, - 0x61, 0xda, 0x1c, 0x6f, 0x8b, 0x84, 0xd3, 0x25, 0xf6, 0x55, 0x4c, 0x78, 0x5b, 0xcd, 0xa1, 0xc1, - 0xb9, 0xa5, 0xd1, 0x4b, 0x30, 0x22, 0x3f, 0x5f, 0x0b, 0xca, 0x12, 0x3f, 0x89, 0xd1, 0x70, 0xd8, - 0xa0, 0x44, 0x21, 0x1c, 0x97, 0xff, 0x37, 0x02, 0x67, 0x73, 0xd3, 0xad, 0x89, 0x48, 0x05, 0xfc, - 0xf9, 0xf0, 0x87, 0xe5, 0x5b, 0xc5, 0xa5, 0x2c, 0xa2, 0x7b, 0xfb, 0xe5, 0x53, 0xa2, 0xd7, 0x32, - 0xf1, 0x38, 0x9b, 0x37, 0x5a, 0x83, 0xa9, 0x6d, 0xe2, 0x34, 0xa2, 0xed, 0x85, 0x6d, 0x52, 0xdb, - 0x91, 0x0b, 0x8e, 0x85, 0x79, 0xd1, 0x9e, 0x8e, 0x5c, 0x4e, 0x93, 0xe0, 0xac, 0x72, 0xe8, 0x2d, - 0x98, 0x6e, 0xb5, 0x6f, 0x36, 0xdc, 0x70, 0x7b, 0xdd, 0x8f, 0x98, 0x13, 0xd2, 0x5c, 0xbd, 0x1e, - 0x90, 0x90, 0xbf, 0x2e, 0x65, 0x47, 0xaf, 0x0c, 0xa4, 0x53, 0xc9, 0xa1, 0xc3, 0xb9, 0x1c, 0xd0, - 0x1d, 0x38, 0x9e, 0x98, 0x08, 0x22, 0x22, 0xc6, 0x58, 0x7e, 0x02, 0x9c, 0x6a, 0x56, 0x01, 0x11, - 0x5c, 0x26, 0x0b, 0x85, 0xb3, 0xab, 0x40, 0x2f, 0x03, 0xb8, 0xad, 0x65, 0xa7, 0xe9, 0x36, 0xe8, - 0x55, 0x71, 0x8a, 0xcd, 0x11, 0x7a, 0x6d, 0x80, 0x95, 0x8a, 0x84, 0xd2, 0xbd, 0x59, 0xfc, 0xdb, - 0xc3, 0x1a, 0x35, 0x5a, 0x85, 0x31, 0xf1, 0x6f, 0x4f, 0x0c, 0xe9, 0xa4, 0xca, 0x95, 0x38, 0x26, - 0x4b, 0xa8, 0x71, 0x4c, 0x40, 0x70, 0xa2, 0x2c, 0xda, 0x82, 0xd3, 0x32, 0x51, 0xa3, 0x3e, 0x3f, - 0xe5, 0x18, 0x84, 0x2c, 0xeb, 0xcc, 0x10, 0x7f, 0x95, 0x32, 0xd7, 0x89, 0x10, 0x77, 0xe6, 0x43, - 0xcf, 0x75, 0x7d, 0x9a, 0xf3, 0x37, 0xc7, 0xc7, 0xe3, 0x88, 0x83, 0xab, 0x49, 0x24, 0x4e, 0xd3, - 0x23, 0x1f, 0x8e, 0xbb, 0x5e, 0xd6, 0xac, 0x3e, 0xc1, 0x18, 0x7d, 0x88, 0x3f, 0xb7, 0xee, 0x3c, - 0xa3, 0x33, 0xf1, 0x38, 0x9b, 0xef, 0x3b, 0xf3, 0xfb, 0xfb, 0x43, 0x8b, 0x96, 0xd6, 0xa4, 0x73, - 0xf4, 0x29, 0x18, 0xd1, 0x3f, 0x4a, 0x48, 0x1a, 0xe7, 0xb2, 0x85, 0x57, 0x6d, 0x4f, 0xe0, 0xb2, - 0xbd, 0x5a, 0xf7, 0x3a, 0x0e, 0x1b, 0x1c, 0x51, 0x2d, 0x23, 0xb6, 0xc1, 0x85, 0xde, 0x24, 0x99, - 0xde, 0xdd, 0xde, 0x08, 0x64, 0x4f, 0x77, 0xb4, 0x0a, 0x43, 0xb5, 0x86, 0x4b, 0xbc, 0x68, 0xa5, - 0xd2, 0x29, 0x7a, 0xe3, 0x82, 0xa0, 0x11, 0xeb, 0x47, 0x24, 0x90, 0xe1, 0x30, 0xac, 0x38, 0xd8, - 0x2f, 0xc1, 0x70, 0xb5, 0x41, 0x48, 0x8b, 0x3f, 0xdf, 0x41, 0x4f, 0xb2, 0xdb, 0x04, 0x93, 0x07, - 0x2d, 0x26, 0x0f, 0xea, 0x17, 0x05, 0x26, 0x09, 0x4a, 0xbc, 0xfd, 0x5b, 0x05, 0x28, 0x77, 0xc9, - 0x63, 0x94, 0x30, 0x60, 0x59, 0x3d, 0x19, 0xb0, 0xe6, 0x60, 0x3c, 0xfe, 0xa7, 0xeb, 0xc6, 0x94, - 0x0f, 0xec, 0x75, 0x13, 0x8d, 0x93, 0xf4, 0x3d, 0x3f, 0x67, 0xd0, 0x6d, 0x60, 0x7d, 0x5d, 0x1f, - 0xe4, 0x18, 0xb6, 0xef, 0xfe, 0xde, 0x2f, 0xcc, 0xb9, 0x76, 0x4c, 0xfb, 0xeb, 0x05, 0x38, 0xae, - 0xba, 0xf0, 0x3b, 0xb7, 0xe3, 0xae, 0xa5, 0x3b, 0xee, 0x10, 0xac, 0xc0, 0xf6, 0x55, 0x18, 0xe0, - 0x81, 0x2c, 0x7b, 0x10, 0xd4, 0x1f, 0x35, 0xe3, 0x6b, 0x2b, 0xd9, 0xd0, 0x88, 0xb1, 0xfd, 0x03, - 0x16, 0x8c, 0x27, 0xde, 0xc5, 0x21, 0xac, 0x3d, 0x9e, 0xbe, 0x1f, 0x61, 0x3a, 0x4b, 0x4c, 0x3f, - 0x0b, 0x7d, 0xdb, 0x7e, 0x18, 0x25, 0x5d, 0x44, 0x2e, 0xfb, 0x61, 0x84, 0x19, 0xc6, 0xfe, 0x63, - 0x0b, 0xfa, 0x37, 0x1c, 0xd7, 0x8b, 0xa4, 0x39, 0xc1, 0xca, 0x31, 0x27, 0xf4, 0xf2, 0x5d, 0xe8, - 0x45, 0x18, 0x20, 0x9b, 0x9b, 0xa4, 0x16, 0x89, 0x51, 0x95, 0x41, 0x14, 0x06, 0x96, 0x18, 0x94, - 0x4a, 0x8e, 0xac, 0x32, 0xfe, 0x17, 0x0b, 0x62, 0x74, 0x03, 0x4a, 0x91, 0xdb, 0x24, 0x73, 0xf5, - 0xba, 0x30, 0xb2, 0xdf, 0x47, 0xe4, 0x8f, 0x0d, 0xc9, 0x00, 0xc7, 0xbc, 0xec, 0x2f, 0x15, 0x00, - 0xe2, 0x08, 0x60, 0xdd, 0x3e, 0x71, 0x3e, 0x65, 0x7e, 0x3d, 0x97, 0x61, 0x7e, 0x45, 0x31, 0xc3, - 0x0c, 0xdb, 0xab, 0xea, 0xa6, 0x62, 0x4f, 0xdd, 0xd4, 0x77, 0x90, 0x6e, 0x5a, 0x80, 0xc9, 0x38, - 0x82, 0x99, 0x19, 0xc0, 0x91, 0x1d, 0xba, 0x1b, 0x49, 0x24, 0x4e, 0xd3, 0xdb, 0x04, 0xce, 0xaa, - 0x40, 0x4e, 0xe2, 0x2c, 0x64, 0x1e, 0xe4, 0xba, 0x39, 0xbb, 0x4b, 0x3f, 0xc5, 0xf6, 0xe5, 0x42, - 0xae, 0x7d, 0xf9, 0x27, 0x2c, 0x38, 0x96, 0xac, 0x87, 0x3d, 0xb7, 0xfe, 0x82, 0x05, 0xc7, 0xe3, - 0x34, 0x1e, 0x69, 0x9b, 0xfe, 0x0b, 0x1d, 0x83, 0x53, 0xe5, 0xb4, 0x38, 0x8e, 0xd6, 0xb1, 0x96, - 0xc5, 0x1a, 0x67, 0xd7, 0x68, 0xff, 0xd7, 0x3e, 0x98, 0xce, 0x8b, 0x6a, 0xc5, 0x1e, 0x98, 0x38, - 0xb7, 0xab, 0x3b, 0xe4, 0x96, 0x70, 0xe3, 0x8f, 0x1f, 0x98, 0x70, 0x30, 0x96, 0xf8, 0x64, 0xe6, - 0x96, 0x42, 0x8f, 0x99, 0x5b, 0xb6, 0x61, 0xf2, 0xd6, 0x36, 0xf1, 0xae, 0x79, 0xa1, 0x13, 0xb9, - 0xe1, 0xa6, 0xcb, 0x2c, 0xd2, 0x7c, 0xde, 0xc8, 0xec, 0xe3, 0x93, 0x37, 0x92, 0x04, 0xf7, 0xf6, - 0xcb, 0xa7, 0x0d, 0x40, 0xdc, 0x64, 0xbe, 0x91, 0xe0, 0x34, 0xd3, 0x74, 0xe2, 0x9b, 0xbe, 0x07, - 0x9c, 0xf8, 0xa6, 0xe9, 0x0a, 0x3f, 0x16, 0xf9, 0x7a, 0x80, 0xdd, 0x35, 0xd7, 0x14, 0x14, 0x6b, - 0x14, 0xe8, 0x13, 0x80, 0xf4, 0xcc, 0x65, 0x46, 0x50, 0xd1, 0x67, 0xef, 0xee, 0x97, 0xd1, 0x7a, - 0x0a, 0x7b, 0x6f, 0xbf, 0x3c, 0x45, 0xa1, 0x2b, 0x1e, 0xbd, 0xb3, 0xc6, 0x91, 0xd8, 0x32, 0x18, - 0xa1, 0x1b, 0x30, 0x41, 0xa1, 0x6c, 0x45, 0xc9, 0x88, 0xa5, 0xfc, 0x9e, 0xf9, 0xf4, 0xdd, 0xfd, - 0xf2, 0xc4, 0x7a, 0x02, 0x97, 0xc7, 0x3a, 0xc5, 0x24, 0x23, 0xff, 0xcd, 0x50, 0xaf, 0xf9, 0x6f, - 0xec, 0x2f, 0x58, 0x70, 0x92, 0x1e, 0x70, 0xf5, 0xd5, 0x1c, 0xb3, 0xb4, 0xd3, 0x72, 0xb9, 0xe1, - 0x43, 0x1c, 0x35, 0x4c, 0xc1, 0x56, 0x59, 0xe1, 0x66, 0x0f, 0x85, 0xa5, 0x3b, 0xfc, 0x8e, 0xeb, - 0xd5, 0x93, 0x3b, 0xfc, 0x15, 0xd7, 0xab, 0x63, 0x86, 0x51, 0x47, 0x56, 0x31, 0xf7, 0x11, 0xc3, - 0x57, 0xe9, 0x5a, 0xa5, 0x6d, 0xf9, 0x96, 0x36, 0x03, 0x3d, 0xad, 0x1b, 0x29, 0x85, 0x3f, 0x62, - 0xae, 0x81, 0xf2, 0xf3, 0x16, 0x88, 0x47, 0xcf, 0x3d, 0x9c, 0xc9, 0x6f, 0xc2, 0xc8, 0x6e, 0x3a, - 0xab, 0xe3, 0xd9, 0xfc, 0x57, 0xe0, 0x22, 0x56, 0xbb, 0x12, 0xd1, 0x8d, 0x0c, 0x8e, 0x06, 0x2f, - 0xbb, 0x0e, 0x02, 0xbb, 0x48, 0x98, 0x29, 0xa2, 0x7b, 0x6b, 0x9e, 0x03, 0xa8, 0x33, 0x5a, 0x96, - 0xea, 0xb9, 0x60, 0x4a, 0x5c, 0x8b, 0x0a, 0x83, 0x35, 0x2a, 0xfb, 0x77, 0x0b, 0x30, 0x2c, 0xb3, - 0x08, 0xb6, 0xbd, 0x5e, 0x14, 0x86, 0x07, 0x4a, 0x2b, 0x8e, 0x2e, 0x40, 0x89, 0x69, 0xb4, 0x2b, - 0xb1, 0x9e, 0x55, 0xe9, 0x93, 0xd6, 0x24, 0x02, 0xc7, 0x34, 0x4c, 0x7c, 0x6f, 0xdf, 0x64, 0xe4, - 0x89, 0x27, 0xba, 0x55, 0x0e, 0xc6, 0x12, 0x8f, 0x3e, 0x0a, 0x13, 0xbc, 0x5c, 0xe0, 0xb7, 0x9c, - 0x2d, 0x6e, 0x05, 0xeb, 0x57, 0x71, 0x4f, 0x26, 0xd6, 0x12, 0xb8, 0x7b, 0xfb, 0xe5, 0x63, 0x49, - 0x18, 0x33, 0xef, 0xa6, 0xb8, 0x30, 0x67, 0x37, 0x5e, 0x09, 0xdd, 0xd5, 0x53, 0x3e, 0x72, 0x31, - 0x0a, 0xeb, 0x74, 0xf6, 0xa7, 0x00, 0xa5, 0xf3, 0x29, 0xa2, 0xd7, 0xb9, 0xb3, 0xb4, 0x1b, 0x90, - 0x7a, 0x27, 0x73, 0xaf, 0x1e, 0xdd, 0x43, 0xbe, 0xae, 0xe3, 0xa5, 0xb0, 0x2a, 0x6f, 0xff, 0x60, - 0x1f, 0x4c, 0x24, 0xe3, 0x09, 0xa0, 0xcb, 0x30, 0xc0, 0x45, 0x4a, 0xc1, 0xbe, 0x83, 0x37, 0x91, - 0x16, 0x85, 0x80, 0x1d, 0xae, 0x42, 0x2a, 0x15, 0xe5, 0xd1, 0x5b, 0x30, 0x5c, 0xf7, 0x6f, 0x79, - 0xb7, 0x9c, 0xa0, 0x3e, 0x57, 0x59, 0x11, 0xd3, 0x39, 0x53, 0xc5, 0xb1, 0x18, 0x93, 0xe9, 0x91, - 0x0d, 0x98, 0xe5, 0x3c, 0x46, 0x61, 0x9d, 0x1d, 0xda, 0x60, 0x29, 0x42, 0x36, 0xdd, 0xad, 0x35, - 0xa7, 0xd5, 0xe9, 0xe5, 0xcc, 0x82, 0x24, 0xd2, 0x38, 0x8f, 0x8a, 0x3c, 0x22, 0x1c, 0x81, 0x63, - 0x46, 0xe8, 0x33, 0x30, 0x15, 0xe6, 0x18, 0x5d, 0xf2, 0xd2, 0xeb, 0x76, 0xb2, 0x43, 0xcc, 0x3f, - 0x74, 0x77, 0xbf, 0x3c, 0x95, 0x65, 0x9e, 0xc9, 0xaa, 0x06, 0xdd, 0x06, 0x24, 0x94, 0x9b, 0x1b, - 0x41, 0x3b, 0x8c, 0xe6, 0xdb, 0x5e, 0xbd, 0x21, 0x53, 0x88, 0x64, 0x27, 0xe0, 0x4e, 0x51, 0x6b, - 0x75, 0xb3, 0xf8, 0xa2, 0x69, 0x0a, 0x9c, 0x51, 0x87, 0xfd, 0xf9, 0x3e, 0x98, 0x91, 0x09, 0x4c, - 0x33, 0x5e, 0x08, 0x7c, 0xce, 0x4a, 0x3c, 0x11, 0x78, 0x39, 0x7f, 0x57, 0x7a, 0x60, 0x0f, 0x05, - 0xbe, 0x98, 0x7e, 0x28, 0xf0, 0xea, 0x01, 0x9b, 0x71, 0x68, 0xcf, 0x05, 0xbe, 0x63, 0x7d, 0xfc, - 0xbf, 0x7c, 0x0c, 0x8c, 0x73, 0xc4, 0x48, 0xf8, 0x6f, 0x1d, 0x52, 0xc2, 0x7f, 0x0c, 0x43, 0xa4, - 0xd9, 0x8a, 0xf6, 0x16, 0xdd, 0x40, 0xb4, 0x38, 0x93, 0xe7, 0x92, 0xa0, 0x49, 0xf3, 0x94, 0x18, - 0xac, 0xf8, 0xa0, 0x5d, 0x98, 0xdc, 0xaa, 0x91, 0x44, 0xce, 0xef, 0x62, 0xfe, 0xba, 0xbd, 0xb4, - 0xb0, 0xd4, 0x21, 0xe1, 0x37, 0xbb, 0xa9, 0xa4, 0x48, 0x70, 0xba, 0x0a, 0x96, 0x6f, 0xdc, 0xb9, - 0x15, 0x2e, 0x35, 0x9c, 0x30, 0x72, 0x6b, 0xf3, 0x0d, 0xbf, 0xb6, 0x53, 0x8d, 0xfc, 0x40, 0x26, - 0x1c, 0xcb, 0xbc, 0x28, 0xcc, 0xdd, 0xa8, 0xa6, 0xe8, 0xd3, 0xf9, 0xc6, 0xb3, 0xa8, 0x70, 0x66, - 0x5d, 0x68, 0x1d, 0x06, 0xb7, 0xdc, 0x08, 0x93, 0x96, 0x2f, 0x76, 0x8b, 0xcc, 0xad, 0xf0, 0x12, - 0x27, 0x49, 0xe7, 0xff, 0x16, 0x08, 0x2c, 0x99, 0xa0, 0xd7, 0xd5, 0x21, 0x30, 0x90, 0xaf, 0x2d, - 0x4c, 0x7b, 0x5e, 0x65, 0x1e, 0x03, 0xaf, 0x41, 0xd1, 0xdb, 0x0c, 0x3b, 0xc5, 0x0b, 0x59, 0x5f, - 0xae, 0xa6, 0xf3, 0x72, 0xaf, 0x2f, 0x57, 0x31, 0x2d, 0xc8, 0x9e, 0x16, 0x86, 0xb5, 0xd0, 0x15, - 0xa9, 0x53, 0x32, 0x5f, 0x5a, 0xae, 0x54, 0x17, 0xaa, 0x2b, 0xe9, 0x5c, 0xe4, 0x0c, 0x8c, 0x79, - 0x71, 0x74, 0x1d, 0x4a, 0x5b, 0x7c, 0xe3, 0xdb, 0x0c, 0x45, 0x12, 0xe3, 0xcc, 0xc3, 0xe8, 0x92, - 0x24, 0x4a, 0x67, 0x20, 0x57, 0x28, 0x1c, 0xb3, 0x42, 0x9f, 0xb7, 0xe0, 0x78, 0x32, 0x0b, 0x34, - 0x7b, 0x10, 0x24, 0x9c, 0x94, 0x5e, 0xec, 0x25, 0x2d, 0x37, 0x2b, 0x60, 0x54, 0xc8, 0x14, 0xfc, - 0x99, 0x64, 0x38, 0xbb, 0x3a, 0xda, 0xd1, 0xc1, 0xcd, 0xba, 0x70, 0x96, 0xc9, 0xec, 0xe8, 0x44, - 0xf0, 0x14, 0xde, 0xd1, 0x78, 0x7e, 0x11, 0xd3, 0x82, 0x68, 0x03, 0x60, 0xb3, 0x41, 0x64, 0xc2, - 0xfa, 0x91, 0xfc, 0xd3, 0x7f, 0x59, 0x51, 0xc9, 0x6c, 0x41, 0x54, 0x26, 0x8c, 0xa1, 0x58, 0xe3, - 0x43, 0xa7, 0x52, 0xcd, 0xf5, 0xea, 0x24, 0x60, 0xe6, 0x93, 0x9c, 0xa9, 0xb4, 0xc0, 0x28, 0xd2, - 0x53, 0x89, 0xc3, 0xb1, 0xe0, 0xc0, 0x78, 0x91, 0xd6, 0xf6, 0x66, 0xd8, 0x29, 0x2c, 0xfe, 0x02, - 0x69, 0x6d, 0x27, 0x26, 0x14, 0xe7, 0xc5, 0xe0, 0x58, 0x70, 0xa0, 0x4b, 0x66, 0x93, 0x2e, 0x20, - 0x12, 0x4c, 0x8f, 0xe7, 0x2f, 0x99, 0x65, 0x4e, 0x92, 0x5e, 0x32, 0x02, 0x81, 0x25, 0x13, 0xf4, - 0x49, 0x53, 0xda, 0x99, 0x60, 0x3c, 0x9f, 0xee, 0x22, 0xed, 0x18, 0x7c, 0x3b, 0xcb, 0x3b, 0x2f, - 0x43, 0x61, 0xb3, 0xc6, 0xcc, 0x2e, 0x39, 0x0a, 0xee, 0xe5, 0x05, 0x83, 0x1b, 0x0b, 0x33, 0xbd, - 0xbc, 0x80, 0x0b, 0x9b, 0x35, 0x3a, 0xf5, 0x9d, 0x3b, 0xed, 0x80, 0x2c, 0xbb, 0x0d, 0x22, 0x42, - 0xe4, 0x67, 0x4e, 0xfd, 0x39, 0x49, 0x94, 0x9e, 0xfa, 0x0a, 0x85, 0x63, 0x56, 0x94, 0x6f, 0x2c, - 0x83, 0x4d, 0xe5, 0xf3, 0x55, 0xa2, 0x56, 0x9a, 0x6f, 0xa6, 0x14, 0xb6, 0x03, 0xa3, 0xbb, 0x61, - 0x6b, 0x9b, 0xc8, 0x5d, 0x91, 0x19, 0x84, 0x72, 0x5e, 0xd3, 0x5f, 0x17, 0x84, 0x6e, 0x10, 0xb5, - 0x9d, 0x46, 0x6a, 0x23, 0x67, 0x7a, 0x80, 0xeb, 0x3a, 0x33, 0x6c, 0xf2, 0xa6, 0x13, 0xe1, 0x6d, - 0x1e, 0xf2, 0x8a, 0x99, 0x86, 0x72, 0x26, 0x42, 0x46, 0x54, 0x2c, 0x3e, 0x11, 0x04, 0x02, 0x4b, - 0x26, 0xaa, 0xb3, 0xd9, 0x01, 0x74, 0xa2, 0x4b, 0x67, 0xa7, 0xda, 0x1b, 0x77, 0x36, 0x3b, 0x70, - 0x62, 0x56, 0xec, 0xa0, 0x69, 0x65, 0x24, 0xcc, 0x9e, 0x7e, 0x28, 0xff, 0xa0, 0xe9, 0x96, 0x60, - 0x9b, 0x1f, 0x34, 0x59, 0x54, 0x38, 0xb3, 0x2e, 0xfa, 0x71, 0x2d, 0x19, 0xbd, 0x4c, 0x84, 0xf1, - 0x7f, 0x32, 0x27, 0xf8, 0x5f, 0x3a, 0xc4, 0x19, 0xff, 0x38, 0x85, 0xc2, 0x31, 0x2b, 0x54, 0x87, - 0xb1, 0x96, 0x11, 0x15, 0x93, 0xa5, 0x23, 0xc8, 0x91, 0x0b, 0xb2, 0xe2, 0x67, 0x72, 0x75, 0x86, - 0x89, 0xc1, 0x09, 0x9e, 0xcc, 0x37, 0x8c, 0x3f, 0xf4, 0x62, 0xd9, 0x0a, 0x72, 0x86, 0x3a, 0xe3, - 0x2d, 0x18, 0x1f, 0x6a, 0x81, 0xc0, 0x92, 0x09, 0xed, 0x0d, 0xf1, 0x3c, 0xc9, 0x0f, 0x59, 0xd2, - 0x8f, 0x3c, 0x13, 0x6e, 0x96, 0x4d, 0x43, 0x86, 0x82, 0x16, 0x28, 0x1c, 0xb3, 0xa2, 0x3b, 0x39, - 0x3d, 0xf0, 0x4e, 0xe5, 0xef, 0xe4, 0xc9, 0xe3, 0x8e, 0xed, 0xe4, 0xf4, 0xb0, 0x2b, 0x8a, 0xa3, - 0x4e, 0x45, 0x2e, 0x66, 0x09, 0x0b, 0x72, 0xda, 0xa5, 0x42, 0x1f, 0xa7, 0xdb, 0xa5, 0x50, 0x38, - 0x66, 0x65, 0xff, 0x60, 0x01, 0xce, 0x74, 0x5e, 0x6f, 0xb1, 0xa1, 0xa6, 0x12, 0x7b, 0xb3, 0x24, - 0x0c, 0x35, 0x5c, 0x6d, 0x10, 0x53, 0xf5, 0x1c, 0xcc, 0xf4, 0x12, 0x4c, 0xaa, 0x47, 0x64, 0x0d, - 0xb7, 0xb6, 0xb7, 0x1e, 0x6b, 0x6a, 0x54, 0xd8, 0x8f, 0x6a, 0x92, 0x00, 0xa7, 0xcb, 0xa0, 0x39, - 0x18, 0x37, 0x80, 0x2b, 0x8b, 0x42, 0x3d, 0x10, 0x87, 0xc8, 0x37, 0xd1, 0x38, 0x49, 0x6f, 0xff, - 0x9c, 0x05, 0x0f, 0xe5, 0xe4, 0x2b, 0xee, 0x39, 0x56, 0xe7, 0x26, 0x8c, 0xb7, 0xcc, 0xa2, 0x5d, - 0xc2, 0x0b, 0x1b, 0x59, 0x91, 0x55, 0x5b, 0x13, 0x08, 0x9c, 0x64, 0x6a, 0xff, 0x4c, 0x01, 0x4e, - 0x77, 0xf4, 0x8a, 0x46, 0x18, 0x4e, 0x6c, 0x35, 0x43, 0x67, 0x21, 0x20, 0x75, 0xe2, 0x45, 0xae, - 0xd3, 0xa8, 0xb6, 0x48, 0x4d, 0x33, 0xb5, 0x31, 0xf7, 0xe2, 0x4b, 0x6b, 0xd5, 0xb9, 0x34, 0x05, - 0xce, 0x29, 0x89, 0x96, 0x01, 0xa5, 0x31, 0x62, 0x84, 0xd9, 0xd5, 0x34, 0xcd, 0x0f, 0x67, 0x94, - 0x40, 0x1f, 0x84, 0x51, 0xe5, 0x6d, 0xad, 0x8d, 0x38, 0xdb, 0xd8, 0xb1, 0x8e, 0xc0, 0x26, 0x1d, - 0xba, 0xc8, 0x73, 0xa7, 0x88, 0x2c, 0x3b, 0xc2, 0x2e, 0x37, 0x2e, 0x13, 0xa3, 0x08, 0x30, 0xd6, - 0x69, 0xe6, 0x5f, 0xfa, 0xed, 0x6f, 0x9e, 0x79, 0xdf, 0xef, 0x7f, 0xf3, 0xcc, 0xfb, 0xfe, 0xe8, - 0x9b, 0x67, 0xde, 0xf7, 0x3d, 0x77, 0xcf, 0x58, 0xbf, 0x7d, 0xf7, 0x8c, 0xf5, 0xfb, 0x77, 0xcf, - 0x58, 0x7f, 0x74, 0xf7, 0x8c, 0xf5, 0xef, 0xee, 0x9e, 0xb1, 0xbe, 0xf4, 0xa7, 0x67, 0xde, 0xf7, - 0x26, 0x8a, 0xa3, 0xdf, 0x5e, 0xa0, 0xa3, 0x73, 0x61, 0xf7, 0xe2, 0xff, 0x0a, 0x00, 0x00, 0xff, - 0xff, 0x31, 0xd1, 0xcb, 0x48, 0xf3, 0x1a, 0x01, 0x00, + // 14822 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x24, 0xc9, + 0x75, 0x18, 0xcc, 0xea, 0xc6, 0xd5, 0x0f, 0x77, 0x62, 0x0e, 0x0c, 0x76, 0x66, 0x7a, 0xb6, 0x76, + 0x77, 0x76, 0xf6, 0xc2, 0x70, 0xf6, 0x20, 0x97, 0xbb, 0xe4, 0x8a, 0x38, 0x67, 0xb0, 0x03, 0x60, + 0x7a, 0xb3, 0x31, 0x33, 0xe4, 0x72, 0xc9, 0x60, 0xa1, 0x3b, 0x01, 0x14, 0xd1, 0xa8, 0xea, 0xad, + 0xaa, 0xc6, 0x0c, 0xe6, 0x23, 0x43, 0x12, 0xf5, 0xe9, 0xa0, 0xa4, 0xef, 0x0b, 0xc6, 0x17, 0xfa, + 0x8e, 0xa0, 0x14, 0x8a, 0x2f, 0x24, 0x59, 0x87, 0x69, 0xd9, 0xa6, 0x29, 0x4b, 0xb2, 0xa8, 0xcb, + 0x57, 0x58, 0x72, 0x38, 0x64, 0x59, 0x11, 0x16, 0x15, 0xa1, 0x30, 0x24, 0x8e, 0x1c, 0x21, 0x2b, + 0xc2, 0x96, 0xe4, 0xe3, 0x87, 0x0d, 0xcb, 0x96, 0x23, 0xcf, 0xca, 0xac, 0xa3, 0xbb, 0x31, 0x8b, + 0x01, 0x97, 0x8c, 0xfd, 0xd7, 0xfd, 0xde, 0xcb, 0x97, 0x59, 0x79, 0xbe, 0x7c, 0xef, 0xe5, 0x7b, + 0xf0, 0xea, 0xf6, 0xcb, 0xe1, 0xb4, 0xeb, 0x5f, 0xde, 0x6e, 0xad, 0x93, 0xc0, 0x23, 0x11, 0x09, + 0x2f, 0xef, 0x12, 0xaf, 0xee, 0x07, 0x97, 0x05, 0xc2, 0x69, 0xba, 0x97, 0x6b, 0x7e, 0x40, 0x2e, + 0xef, 0x5e, 0xb9, 0xbc, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0x4f, 0x37, 0x03, 0x3f, 0xf2, 0x11, + 0xe2, 0x34, 0xd3, 0x4e, 0xd3, 0x9d, 0xa6, 0x34, 0xd3, 0xbb, 0x57, 0xa6, 0x9e, 0xdb, 0x74, 0xa3, + 0xad, 0xd6, 0xfa, 0x74, 0xcd, 0xdf, 0xb9, 0xbc, 0xe9, 0x6f, 0xfa, 0x97, 0x19, 0xe9, 0x7a, 0x6b, + 0x83, 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0xce, 0x62, 0xea, 0xc5, 0xb8, 0x9a, 0x1d, 0xa7, 0xb6, 0xe5, + 0x7a, 0x24, 0xd8, 0xbb, 0xdc, 0xdc, 0xde, 0x64, 0xf5, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x8d, 0x24, + 0x2b, 0x6e, 0x5b, 0x2a, 0xbc, 0xbc, 0x43, 0x22, 0x27, 0xa3, 0xb9, 0x53, 0x97, 0xf3, 0x4a, 0x05, + 0x2d, 0x2f, 0x72, 0x77, 0xd2, 0xd5, 0x7c, 0xa0, 0x53, 0x81, 0xb0, 0xb6, 0x45, 0x76, 0x9c, 0x54, + 0xb9, 0x17, 0xf2, 0xca, 0xb5, 0x22, 0xb7, 0x71, 0xd9, 0xf5, 0xa2, 0x30, 0x0a, 0x92, 0x85, 0xec, + 0xaf, 0x5b, 0x70, 0x61, 0xe6, 0x76, 0x75, 0xa1, 0xe1, 0x84, 0x91, 0x5b, 0x9b, 0x6d, 0xf8, 0xb5, + 0xed, 0x6a, 0xe4, 0x07, 0xe4, 0x96, 0xdf, 0x68, 0xed, 0x90, 0x2a, 0xeb, 0x08, 0xf4, 0x2c, 0x0c, + 0xec, 0xb2, 0xff, 0x4b, 0xf3, 0x93, 0xd6, 0x05, 0xeb, 0x52, 0x69, 0x76, 0xec, 0xb7, 0xf6, 0xcb, + 0xef, 0xbb, 0xbf, 0x5f, 0x1e, 0xb8, 0x25, 0xe0, 0x58, 0x51, 0xa0, 0x8b, 0xd0, 0xb7, 0x11, 0xae, + 0xed, 0x35, 0xc9, 0x64, 0x81, 0xd1, 0x8e, 0x08, 0xda, 0xbe, 0xc5, 0x2a, 0x85, 0x62, 0x81, 0x45, + 0x97, 0xa1, 0xd4, 0x74, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2c, 0x5e, 0xb0, 0x2e, 0xf5, 0xce, + 0x8e, 0x0b, 0xd2, 0x52, 0x45, 0x22, 0x70, 0x4c, 0x43, 0x9b, 0x11, 0x10, 0xa7, 0x7e, 0xc3, 0x6b, + 0xec, 0x4d, 0xf6, 0x5c, 0xb0, 0x2e, 0x0d, 0xc4, 0xcd, 0xc0, 0x02, 0x8e, 0x15, 0x85, 0xfd, 0xa5, + 0x02, 0x0c, 0xcc, 0x6c, 0x6c, 0xb8, 0x9e, 0x1b, 0xed, 0xa1, 0x5b, 0x30, 0xe4, 0xf9, 0x75, 0x22, + 0xff, 0xb3, 0xaf, 0x18, 0x7c, 0xfe, 0xc2, 0x74, 0x7a, 0x2a, 0x4d, 0xaf, 0x6a, 0x74, 0xb3, 0x63, + 0xf7, 0xf7, 0xcb, 0x43, 0x3a, 0x04, 0x1b, 0x7c, 0x10, 0x86, 0xc1, 0xa6, 0x5f, 0x57, 0x6c, 0x0b, + 0x8c, 0x6d, 0x39, 0x8b, 0x6d, 0x25, 0x26, 0x9b, 0x1d, 0xbd, 0xbf, 0x5f, 0x1e, 0xd4, 0x00, 0x58, + 0x67, 0x82, 0xd6, 0x61, 0x94, 0xfe, 0xf5, 0x22, 0x57, 0xf1, 0x2d, 0x32, 0xbe, 0x8f, 0xe5, 0xf1, + 0xd5, 0x48, 0x67, 0x27, 0xee, 0xef, 0x97, 0x47, 0x13, 0x40, 0x9c, 0x64, 0x68, 0xdf, 0x83, 0x91, + 0x99, 0x28, 0x72, 0x6a, 0x5b, 0xa4, 0xce, 0x47, 0x10, 0xbd, 0x08, 0x3d, 0x9e, 0xb3, 0x43, 0xc4, + 0xf8, 0x5e, 0x10, 0x1d, 0xdb, 0xb3, 0xea, 0xec, 0x90, 0x83, 0xfd, 0xf2, 0xd8, 0x4d, 0xcf, 0x7d, + 0xbb, 0x25, 0x66, 0x05, 0x85, 0x61, 0x46, 0x8d, 0x9e, 0x07, 0xa8, 0x93, 0x5d, 0xb7, 0x46, 0x2a, + 0x4e, 0xb4, 0x25, 0xc6, 0x1b, 0x89, 0xb2, 0x30, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xef, 0x42, 0x69, + 0x66, 0xd7, 0x77, 0xeb, 0x15, 0xbf, 0x1e, 0xa2, 0x6d, 0x18, 0x6d, 0x06, 0x64, 0x83, 0x04, 0x0a, + 0x34, 0x69, 0x5d, 0x28, 0x5e, 0x1a, 0x7c, 0xfe, 0x52, 0xe6, 0xc7, 0x9a, 0xa4, 0x0b, 0x5e, 0x14, + 0xec, 0xcd, 0x9e, 0x16, 0xf5, 0x8d, 0x26, 0xb0, 0x38, 0xc9, 0xd9, 0xfe, 0x27, 0x05, 0x38, 0x39, + 0x73, 0xaf, 0x15, 0x90, 0x79, 0x37, 0xdc, 0x4e, 0xce, 0xf0, 0xba, 0x1b, 0x6e, 0xaf, 0xc6, 0x3d, + 0xa0, 0xa6, 0xd6, 0xbc, 0x80, 0x63, 0x45, 0x81, 0x9e, 0x83, 0x7e, 0xfa, 0xfb, 0x26, 0x5e, 0x12, + 0x9f, 0x3c, 0x21, 0x88, 0x07, 0xe7, 0x9d, 0xc8, 0x99, 0xe7, 0x28, 0x2c, 0x69, 0xd0, 0x0a, 0x0c, + 0xd6, 0xd8, 0x82, 0xdc, 0x5c, 0xf1, 0xeb, 0x84, 0x0d, 0x66, 0x69, 0xf6, 0x19, 0x4a, 0x3e, 0x17, + 0x83, 0x0f, 0xf6, 0xcb, 0x93, 0xbc, 0x6d, 0x82, 0x85, 0x86, 0xc3, 0x7a, 0x79, 0x64, 0xab, 0xf5, + 0xd5, 0xc3, 0x38, 0x41, 0xc6, 0xda, 0xba, 0xa4, 0x2d, 0x95, 0x5e, 0xb6, 0x54, 0x86, 0xb2, 0x97, + 0x09, 0xba, 0x02, 0x3d, 0xdb, 0xae, 0x57, 0x9f, 0xec, 0x63, 0xbc, 0xce, 0xd1, 0x31, 0xbf, 0xee, + 0x7a, 0xf5, 0x83, 0xfd, 0xf2, 0xb8, 0xd1, 0x1c, 0x0a, 0xc4, 0x8c, 0xd4, 0xfe, 0xcf, 0x16, 0x94, + 0x19, 0x6e, 0xd1, 0x6d, 0x90, 0x0a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x1d, 0xfa, 0x3c, + 0x40, 0x48, 0x6a, 0x01, 0x89, 0xb4, 0x2e, 0x55, 0x13, 0xa3, 0xaa, 0x30, 0x58, 0xa3, 0xa2, 0x1b, + 0x42, 0xb8, 0xe5, 0x04, 0x6c, 0x7e, 0x89, 0x8e, 0x55, 0x1b, 0x42, 0x55, 0x22, 0x70, 0x4c, 0x63, + 0x6c, 0x08, 0xc5, 0x4e, 0x1b, 0x02, 0xfa, 0x08, 0x8c, 0xc6, 0x95, 0x85, 0x4d, 0xa7, 0x26, 0x3b, + 0x90, 0x2d, 0x99, 0xaa, 0x89, 0xc2, 0x49, 0x5a, 0xfb, 0x6f, 0x5a, 0x62, 0xf2, 0xd0, 0xaf, 0x7e, + 0x97, 0x7f, 0xab, 0xfd, 0xcb, 0x16, 0xf4, 0xcf, 0xba, 0x5e, 0xdd, 0xf5, 0x36, 0xd1, 0xa7, 0x61, + 0x80, 0x9e, 0x4d, 0x75, 0x27, 0x72, 0xc4, 0xbe, 0xf7, 0x7e, 0x6d, 0x6d, 0xa9, 0xa3, 0x62, 0xba, + 0xb9, 0xbd, 0x49, 0x01, 0xe1, 0x34, 0xa5, 0xa6, 0xab, 0xed, 0xc6, 0xfa, 0x67, 0x48, 0x2d, 0x5a, + 0x21, 0x91, 0x13, 0x7f, 0x4e, 0x0c, 0xc3, 0x8a, 0x2b, 0xba, 0x0e, 0x7d, 0x91, 0x13, 0x6c, 0x92, + 0x48, 0x6c, 0x80, 0x99, 0x1b, 0x15, 0x2f, 0x89, 0xe9, 0x8a, 0x24, 0x5e, 0x8d, 0xc4, 0xc7, 0xc2, + 0x1a, 0x2b, 0x8a, 0x05, 0x0b, 0xfb, 0x7f, 0xf4, 0xc3, 0x99, 0xb9, 0xea, 0x52, 0xce, 0xbc, 0xba, + 0x08, 0x7d, 0xf5, 0xc0, 0xdd, 0x25, 0x81, 0xe8, 0x67, 0xc5, 0x65, 0x9e, 0x41, 0xb1, 0xc0, 0xa2, + 0x97, 0x61, 0x88, 0x1f, 0x48, 0xd7, 0x1c, 0xaf, 0xde, 0x90, 0x5d, 0x7c, 0x42, 0x50, 0x0f, 0xdd, + 0xd2, 0x70, 0xd8, 0xa0, 0x3c, 0xe4, 0xa4, 0xba, 0x98, 0x58, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2c, + 0x18, 0xe3, 0xd5, 0xcc, 0x44, 0x51, 0xe0, 0xae, 0xb7, 0x22, 0x12, 0x4e, 0xf6, 0xb2, 0x9d, 0x6e, + 0x2e, 0xab, 0xb7, 0x72, 0x7b, 0x60, 0xfa, 0x56, 0x82, 0x0b, 0xdf, 0x04, 0x27, 0x45, 0xbd, 0x63, + 0x49, 0x34, 0x4e, 0x55, 0x8b, 0xbe, 0xc7, 0x82, 0xa9, 0x9a, 0xef, 0x45, 0x81, 0xdf, 0x68, 0x90, + 0xa0, 0xd2, 0x5a, 0x6f, 0xb8, 0xe1, 0x16, 0x9f, 0xa7, 0x98, 0x6c, 0xb0, 0x9d, 0x20, 0x67, 0x0c, + 0x15, 0x91, 0x18, 0xc3, 0xf3, 0xf7, 0xf7, 0xcb, 0x53, 0x73, 0xb9, 0xac, 0x70, 0x9b, 0x6a, 0xd0, + 0x36, 0x20, 0x7a, 0x94, 0x56, 0x23, 0x67, 0x93, 0xc4, 0x95, 0xf7, 0x77, 0x5f, 0xf9, 0xa9, 0xfb, + 0xfb, 0x65, 0xb4, 0x9a, 0x62, 0x81, 0x33, 0xd8, 0xa2, 0xb7, 0xe1, 0x04, 0x85, 0xa6, 0xbe, 0x75, + 0xa0, 0xfb, 0xea, 0x26, 0xef, 0xef, 0x97, 0x4f, 0xac, 0x66, 0x30, 0xc1, 0x99, 0xac, 0xd1, 0x77, + 0x59, 0x70, 0x26, 0xfe, 0xfc, 0x85, 0xbb, 0x4d, 0xc7, 0xab, 0xc7, 0x15, 0x97, 0xba, 0xaf, 0x98, + 0xee, 0xc9, 0x67, 0xe6, 0xf2, 0x38, 0xe1, 0xfc, 0x4a, 0x90, 0x07, 0x13, 0xb4, 0x69, 0xc9, 0xba, + 0xa1, 0xfb, 0xba, 0x4f, 0xdf, 0xdf, 0x2f, 0x4f, 0xac, 0xa6, 0x79, 0xe0, 0x2c, 0xc6, 0x53, 0x73, + 0x70, 0x32, 0x73, 0x76, 0xa2, 0x31, 0x28, 0x6e, 0x13, 0x2e, 0x75, 0x95, 0x30, 0xfd, 0x89, 0x4e, + 0x40, 0xef, 0xae, 0xd3, 0x68, 0x89, 0x85, 0x89, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0x3f, + 0x2d, 0xc2, 0xe8, 0x5c, 0x75, 0xe9, 0x81, 0x56, 0xbd, 0x7e, 0xec, 0x15, 0xda, 0x1e, 0x7b, 0xf1, + 0x21, 0x5a, 0xcc, 0x3d, 0x44, 0xbf, 0x33, 0x63, 0xc9, 0xf6, 0xb0, 0x25, 0xfb, 0xa1, 0x9c, 0x25, + 0x7b, 0xc4, 0x0b, 0x75, 0x37, 0x67, 0xd6, 0xf6, 0xb2, 0x01, 0xcc, 0x94, 0x90, 0x96, 0xfd, 0x9a, + 0xd3, 0x48, 0x6e, 0xb5, 0x87, 0x9c, 0xba, 0x47, 0x33, 0x8e, 0x35, 0x18, 0x9a, 0x73, 0x9a, 0xce, + 0xba, 0xdb, 0x70, 0x23, 0x97, 0x84, 0xe8, 0x49, 0x28, 0x3a, 0xf5, 0x3a, 0x93, 0xee, 0x4a, 0xb3, + 0x27, 0xef, 0xef, 0x97, 0x8b, 0x33, 0x75, 0x2a, 0x66, 0x80, 0xa2, 0xda, 0xc3, 0x94, 0x02, 0x3d, + 0x0d, 0x3d, 0xf5, 0xc0, 0x6f, 0x4e, 0x16, 0x18, 0x25, 0x5d, 0xe5, 0x3d, 0xf3, 0x81, 0xdf, 0x4c, + 0x90, 0x32, 0x1a, 0xfb, 0x37, 0x0b, 0x70, 0x76, 0x8e, 0x34, 0xb7, 0x16, 0xab, 0x39, 0xe7, 0xc5, + 0x25, 0x18, 0xd8, 0xf1, 0x3d, 0x37, 0xf2, 0x83, 0x50, 0x54, 0xcd, 0x66, 0xc4, 0x8a, 0x80, 0x61, + 0x85, 0x45, 0x17, 0xa0, 0xa7, 0x19, 0x0b, 0xb1, 0x43, 0x52, 0x00, 0x66, 0xe2, 0x2b, 0xc3, 0x50, + 0x8a, 0x56, 0x48, 0x02, 0x31, 0x63, 0x14, 0xc5, 0xcd, 0x90, 0x04, 0x98, 0x61, 0x62, 0x49, 0x80, + 0xca, 0x08, 0xe2, 0x44, 0x48, 0x48, 0x02, 0x14, 0x83, 0x35, 0x2a, 0x54, 0x81, 0x52, 0x98, 0x18, + 0xd9, 0xae, 0x96, 0xe6, 0x30, 0x13, 0x15, 0xd4, 0x48, 0xc6, 0x4c, 0x8c, 0x13, 0xac, 0xaf, 0xa3, + 0xa8, 0xf0, 0xb5, 0x02, 0x20, 0xde, 0x85, 0xdf, 0x62, 0x1d, 0x77, 0x33, 0xdd, 0x71, 0xdd, 0x2f, + 0x89, 0xa3, 0xea, 0xbd, 0xff, 0x62, 0xc1, 0xd9, 0x39, 0xd7, 0xab, 0x93, 0x20, 0x67, 0x02, 0x3e, + 0x9c, 0xbb, 0xf3, 0xe1, 0x84, 0x14, 0x63, 0x8a, 0xf5, 0x1c, 0xc1, 0x14, 0xb3, 0xff, 0xc2, 0x02, + 0xc4, 0x3f, 0xfb, 0x5d, 0xf7, 0xb1, 0x37, 0xd3, 0x1f, 0x7b, 0x04, 0xd3, 0xc2, 0xfe, 0x3b, 0x16, + 0x0c, 0xce, 0x35, 0x1c, 0x77, 0x47, 0x7c, 0xea, 0x1c, 0x8c, 0x4b, 0x45, 0x11, 0x03, 0x6b, 0xb2, + 0x3f, 0xdd, 0xdc, 0xc6, 0x71, 0x12, 0x89, 0xd3, 0xf4, 0xe8, 0x13, 0x70, 0xc6, 0x00, 0xae, 0x91, + 0x9d, 0x66, 0xc3, 0x89, 0xf4, 0x5b, 0x01, 0x3b, 0xfd, 0x71, 0x1e, 0x11, 0xce, 0x2f, 0x6f, 0x2f, + 0xc3, 0xc8, 0x5c, 0xc3, 0x25, 0x5e, 0xb4, 0x54, 0x99, 0xf3, 0xbd, 0x0d, 0x77, 0x13, 0xbd, 0x02, + 0x23, 0x91, 0xbb, 0x43, 0xfc, 0x56, 0x54, 0x25, 0x35, 0xdf, 0x63, 0x77, 0x6d, 0xeb, 0x52, 0xef, + 0x2c, 0xba, 0xbf, 0x5f, 0x1e, 0x59, 0x33, 0x30, 0x38, 0x41, 0x69, 0xff, 0x21, 0x1d, 0x71, 0x7f, + 0xa7, 0xe9, 0x7b, 0xc4, 0x8b, 0xe6, 0x7c, 0xaf, 0xce, 0x75, 0x32, 0xaf, 0x40, 0x4f, 0x44, 0x47, + 0x90, 0x7f, 0xf9, 0x45, 0xb9, 0xb4, 0xe9, 0xb8, 0x1d, 0xec, 0x97, 0x4f, 0xa5, 0x4b, 0xb0, 0x91, + 0x65, 0x65, 0xd0, 0x87, 0xa0, 0x2f, 0x8c, 0x9c, 0xa8, 0x15, 0x8a, 0x4f, 0x7d, 0x54, 0x8e, 0x7f, + 0x95, 0x41, 0x0f, 0xf6, 0xcb, 0xa3, 0xaa, 0x18, 0x07, 0x61, 0x51, 0x00, 0x3d, 0x05, 0xfd, 0x3b, + 0x24, 0x0c, 0x9d, 0x4d, 0x79, 0x7e, 0x8f, 0x8a, 0xb2, 0xfd, 0x2b, 0x1c, 0x8c, 0x25, 0x1e, 0x3d, + 0x06, 0xbd, 0x24, 0x08, 0xfc, 0x40, 0xec, 0x2a, 0xc3, 0x82, 0xb0, 0x77, 0x81, 0x02, 0x31, 0xc7, + 0xd9, 0xff, 0xd2, 0x82, 0x51, 0xd5, 0x56, 0x5e, 0xd7, 0x31, 0xdc, 0x9b, 0xde, 0x04, 0xa8, 0xc9, + 0x0f, 0x0c, 0xd9, 0x79, 0x37, 0xf8, 0xfc, 0xc5, 0x4c, 0xd1, 0x22, 0xd5, 0x8d, 0x31, 0x67, 0x05, + 0x0a, 0xb1, 0xc6, 0xcd, 0xfe, 0x35, 0x0b, 0x26, 0x12, 0x5f, 0xb4, 0xec, 0x86, 0x11, 0x7a, 0x2b, + 0xf5, 0x55, 0xd3, 0xdd, 0x7d, 0x15, 0x2d, 0xcd, 0xbe, 0x49, 0x2d, 0x3e, 0x09, 0xd1, 0xbe, 0xe8, + 0x1a, 0xf4, 0xba, 0x11, 0xd9, 0x91, 0x1f, 0xf3, 0x58, 0xdb, 0x8f, 0xe1, 0xad, 0x8a, 0x47, 0x64, + 0x89, 0x96, 0xc4, 0x9c, 0x81, 0xfd, 0x9b, 0x45, 0x28, 0xf1, 0x69, 0xbb, 0xe2, 0x34, 0x8f, 0x61, + 0x2c, 0x9e, 0x81, 0x92, 0xbb, 0xb3, 0xd3, 0x8a, 0x9c, 0x75, 0x71, 0x00, 0x0d, 0xf0, 0xcd, 0x60, + 0x49, 0x02, 0x71, 0x8c, 0x47, 0x4b, 0xd0, 0xc3, 0x9a, 0xc2, 0xbf, 0xf2, 0xc9, 0xec, 0xaf, 0x14, + 0x6d, 0x9f, 0x9e, 0x77, 0x22, 0x87, 0xcb, 0x7e, 0xea, 0xe4, 0xa3, 0x20, 0xcc, 0x58, 0x20, 0x07, + 0x60, 0xdd, 0xf5, 0x9c, 0x60, 0x8f, 0xc2, 0x26, 0x8b, 0x8c, 0xe1, 0x73, 0xed, 0x19, 0xce, 0x2a, + 0x7a, 0xce, 0x56, 0x7d, 0x58, 0x8c, 0xc0, 0x1a, 0xd3, 0xa9, 0x0f, 0x42, 0x49, 0x11, 0x1f, 0x46, + 0x84, 0x9b, 0xfa, 0x08, 0x8c, 0x26, 0xea, 0xea, 0x54, 0x7c, 0x48, 0x97, 0x00, 0x7f, 0x85, 0x6d, + 0x19, 0xa2, 0xd5, 0x0b, 0xde, 0xae, 0xd8, 0x39, 0xef, 0xc1, 0x89, 0x46, 0xc6, 0xde, 0x2b, 0xc6, + 0xb5, 0xfb, 0xbd, 0xfa, 0xac, 0xf8, 0xec, 0x13, 0x59, 0x58, 0x9c, 0x59, 0x07, 0x95, 0x6a, 0xfc, + 0x26, 0x5d, 0x20, 0x4e, 0x43, 0xbf, 0x20, 0xdc, 0x10, 0x30, 0xac, 0xb0, 0x74, 0xbf, 0x3b, 0xa1, + 0x1a, 0x7f, 0x9d, 0xec, 0x55, 0x49, 0x83, 0xd4, 0x22, 0x3f, 0xf8, 0xa6, 0x36, 0xff, 0x1c, 0xef, + 0x7d, 0xbe, 0x5d, 0x0e, 0x0a, 0x06, 0xc5, 0xeb, 0x64, 0x8f, 0x0f, 0x85, 0xfe, 0x75, 0xc5, 0xb6, + 0x5f, 0xf7, 0x15, 0x0b, 0x86, 0xd5, 0xd7, 0x1d, 0xc3, 0xbe, 0x30, 0x6b, 0xee, 0x0b, 0xe7, 0xda, + 0x4e, 0xf0, 0x9c, 0x1d, 0xe1, 0x6b, 0x05, 0x38, 0xa3, 0x68, 0xe8, 0x6d, 0x86, 0xff, 0x11, 0xb3, + 0xea, 0x32, 0x94, 0x3c, 0xa5, 0xd7, 0xb3, 0x4c, 0x85, 0x5a, 0xac, 0xd5, 0x8b, 0x69, 0xa8, 0x50, + 0xea, 0xc5, 0xc7, 0xec, 0x90, 0xae, 0xf0, 0x16, 0xca, 0xed, 0x59, 0x28, 0xb6, 0xdc, 0xba, 0x38, + 0x60, 0xde, 0x2f, 0x7b, 0xfb, 0xe6, 0xd2, 0xfc, 0xc1, 0x7e, 0xf9, 0xd1, 0x3c, 0x63, 0x0b, 0x3d, + 0xd9, 0xc2, 0xe9, 0x9b, 0x4b, 0xf3, 0x98, 0x16, 0x46, 0x33, 0x30, 0x2a, 0x4f, 0xe8, 0x5b, 0x54, + 0x40, 0xf4, 0x3d, 0x71, 0x0e, 0x29, 0xad, 0x35, 0x36, 0xd1, 0x38, 0x49, 0x8f, 0xe6, 0x61, 0x6c, + 0xbb, 0xb5, 0x4e, 0x1a, 0x24, 0xe2, 0x1f, 0x7c, 0x9d, 0x70, 0x9d, 0x6e, 0x29, 0xbe, 0x4b, 0x5e, + 0x4f, 0xe0, 0x71, 0xaa, 0x84, 0xfd, 0xd7, 0xec, 0x3c, 0x10, 0xbd, 0x57, 0x09, 0x7c, 0x3a, 0xb1, + 0x28, 0xf7, 0x6f, 0xe6, 0x74, 0xee, 0x66, 0x56, 0x5c, 0x27, 0x7b, 0x6b, 0x3e, 0xbd, 0x4b, 0x64, + 0xcf, 0x0a, 0x63, 0xce, 0xf7, 0xb4, 0x9d, 0xf3, 0xbf, 0x50, 0x80, 0x93, 0xaa, 0x07, 0x0c, 0xb1, + 0xf5, 0x5b, 0xbd, 0x0f, 0xae, 0xc0, 0x60, 0x9d, 0x6c, 0x38, 0xad, 0x46, 0xa4, 0x0c, 0x0c, 0xbd, + 0xdc, 0xc8, 0x34, 0x1f, 0x83, 0xb1, 0x4e, 0x73, 0x88, 0x6e, 0xfb, 0xf9, 0x61, 0x76, 0x10, 0x47, + 0x0e, 0x9d, 0xe3, 0x6a, 0xd5, 0x58, 0xb9, 0xab, 0xe6, 0x31, 0xe8, 0x75, 0x77, 0xa8, 0x60, 0x56, + 0x30, 0xe5, 0xad, 0x25, 0x0a, 0xc4, 0x1c, 0x87, 0x9e, 0x80, 0xfe, 0x9a, 0xbf, 0xb3, 0xe3, 0x78, + 0x75, 0x76, 0xe4, 0x95, 0x66, 0x07, 0xa9, 0xec, 0x36, 0xc7, 0x41, 0x58, 0xe2, 0xd0, 0x59, 0xe8, + 0x71, 0x82, 0x4d, 0xae, 0x75, 0x29, 0xcd, 0x0e, 0xd0, 0x9a, 0x66, 0x82, 0xcd, 0x10, 0x33, 0x28, + 0xbd, 0x34, 0xde, 0xf1, 0x83, 0x6d, 0xd7, 0xdb, 0x9c, 0x77, 0x03, 0xb1, 0x24, 0xd4, 0x59, 0x78, + 0x5b, 0x61, 0xb0, 0x46, 0x85, 0x16, 0xa1, 0xb7, 0xe9, 0x07, 0x51, 0x38, 0xd9, 0xc7, 0xba, 0xfb, + 0xd1, 0x9c, 0x8d, 0x88, 0x7f, 0x6d, 0xc5, 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xe2, + 0x68, 0x19, 0xfa, 0x89, 0xb7, 0xbb, 0x18, 0xf8, 0x3b, 0x93, 0x13, 0xf9, 0x9c, 0x16, 0x38, 0x09, + 0x9f, 0x66, 0xb1, 0x8c, 0x2a, 0xc0, 0x58, 0xb2, 0x40, 0x1f, 0x82, 0x22, 0xf1, 0x76, 0x27, 0xfb, + 0x19, 0xa7, 0xa9, 0x1c, 0x4e, 0xb7, 0x9c, 0x20, 0xde, 0xf3, 0x17, 0xbc, 0x5d, 0x4c, 0xcb, 0xa0, + 0x8f, 0x43, 0x49, 0x6e, 0x18, 0xa1, 0x50, 0x67, 0x66, 0x4e, 0x58, 0xb9, 0xcd, 0x60, 0xf2, 0x76, + 0xcb, 0x0d, 0xc8, 0x0e, 0xf1, 0xa2, 0x30, 0xde, 0x21, 0x25, 0x36, 0xc4, 0x31, 0x37, 0x54, 0x83, + 0xa1, 0x80, 0x84, 0xee, 0x3d, 0x52, 0xf1, 0x1b, 0x6e, 0x6d, 0x6f, 0xf2, 0x34, 0x6b, 0xde, 0x53, + 0x6d, 0xbb, 0x0c, 0x6b, 0x05, 0x62, 0x75, 0xbb, 0x0e, 0xc5, 0x06, 0x53, 0xf4, 0x06, 0x0c, 0x07, + 0x24, 0x8c, 0x9c, 0x20, 0x12, 0xb5, 0x4c, 0x2a, 0xf3, 0xd8, 0x30, 0xd6, 0x11, 0xfc, 0x3a, 0x11, + 0x57, 0x13, 0x63, 0xb0, 0xc9, 0x01, 0x7d, 0x5c, 0xea, 0xfe, 0x57, 0xfc, 0x96, 0x17, 0x85, 0x93, + 0x25, 0xd6, 0xee, 0x4c, 0xab, 0xec, 0xad, 0x98, 0x2e, 0x69, 0x1c, 0xe0, 0x85, 0xb1, 0xc1, 0x0a, + 0x7d, 0x12, 0x86, 0xf9, 0x7f, 0x6e, 0xdb, 0x0c, 0x27, 0x4f, 0x32, 0xde, 0x17, 0xf2, 0x79, 0x73, + 0xc2, 0xd9, 0x93, 0x82, 0xf9, 0xb0, 0x0e, 0x0d, 0xb1, 0xc9, 0x0d, 0x61, 0x18, 0x6e, 0xb8, 0xbb, + 0xc4, 0x23, 0x61, 0x58, 0x09, 0xfc, 0x75, 0x22, 0x54, 0xb5, 0x67, 0xb2, 0x6d, 0xa1, 0xfe, 0x3a, + 0x99, 0x1d, 0xa7, 0x3c, 0x97, 0xf5, 0x32, 0xd8, 0x64, 0x81, 0x6e, 0xc2, 0x08, 0xbd, 0x1b, 0xbb, + 0x31, 0xd3, 0xc1, 0x4e, 0x4c, 0xd9, 0x7d, 0x10, 0x1b, 0x85, 0x70, 0x82, 0x09, 0xba, 0x01, 0x43, + 0xac, 0xcf, 0x5b, 0x4d, 0xce, 0xf4, 0x54, 0x27, 0xa6, 0xcc, 0x94, 0x5e, 0xd5, 0x8a, 0x60, 0x83, + 0x01, 0x7a, 0x1d, 0x4a, 0x0d, 0x77, 0x83, 0xd4, 0xf6, 0x6a, 0x0d, 0x32, 0x39, 0xc4, 0xb8, 0x65, + 0x6e, 0x86, 0xcb, 0x92, 0x88, 0xcb, 0xe7, 0xea, 0x2f, 0x8e, 0x8b, 0xa3, 0x5b, 0x70, 0x2a, 0x22, + 0xc1, 0x8e, 0xeb, 0x39, 0x74, 0x13, 0x13, 0x57, 0x42, 0x66, 0xa2, 0x1e, 0x66, 0xb3, 0xeb, 0xbc, + 0x18, 0x8d, 0x53, 0x6b, 0x99, 0x54, 0x38, 0xa7, 0x34, 0xba, 0x0b, 0x93, 0x19, 0x18, 0x3e, 0x6f, + 0x4f, 0x30, 0xce, 0x1f, 0x16, 0x9c, 0x27, 0xd7, 0x72, 0xe8, 0x0e, 0xda, 0xe0, 0x70, 0x2e, 0x77, + 0x74, 0x03, 0x46, 0xd9, 0xce, 0x59, 0x69, 0x35, 0x1a, 0xa2, 0xc2, 0x11, 0x56, 0xe1, 0x13, 0x52, + 0x8e, 0x58, 0x32, 0xd1, 0x07, 0xfb, 0x65, 0x88, 0xff, 0xe1, 0x64, 0x69, 0xb4, 0xce, 0xac, 0xa1, + 0xad, 0xc0, 0x8d, 0xf6, 0xe8, 0xaa, 0x22, 0x77, 0xa3, 0xc9, 0xd1, 0xb6, 0x9a, 0x21, 0x9d, 0x54, + 0x99, 0x4c, 0x75, 0x20, 0x4e, 0x32, 0xa4, 0x47, 0x41, 0x18, 0xd5, 0x5d, 0x6f, 0x72, 0x8c, 0xdf, + 0xa7, 0xe4, 0x4e, 0x5a, 0xa5, 0x40, 0xcc, 0x71, 0xcc, 0x12, 0x4a, 0x7f, 0xdc, 0xa0, 0x27, 0xee, + 0x38, 0x23, 0x8c, 0x2d, 0xa1, 0x12, 0x81, 0x63, 0x1a, 0x2a, 0x04, 0x47, 0xd1, 0xde, 0x24, 0x62, + 0xa4, 0x6a, 0x43, 0x5c, 0x5b, 0xfb, 0x38, 0xa6, 0x70, 0x7b, 0x1d, 0x46, 0xd4, 0x36, 0xc1, 0xfa, + 0x04, 0x95, 0xa1, 0x97, 0x89, 0x7d, 0x42, 0x8f, 0x59, 0xa2, 0x4d, 0x60, 0x22, 0x21, 0xe6, 0x70, + 0xd6, 0x04, 0xf7, 0x1e, 0x99, 0xdd, 0x8b, 0x08, 0xd7, 0x45, 0x14, 0xb5, 0x26, 0x48, 0x04, 0x8e, + 0x69, 0xec, 0xff, 0xc9, 0xc5, 0xe7, 0xf8, 0x94, 0xe8, 0xe2, 0x5c, 0x7c, 0x16, 0x06, 0xb6, 0xfc, + 0x30, 0xa2, 0xd4, 0xac, 0x8e, 0xde, 0x58, 0x60, 0xbe, 0x26, 0xe0, 0x58, 0x51, 0xa0, 0x57, 0x61, + 0xb8, 0xa6, 0x57, 0x20, 0x0e, 0x75, 0xb5, 0x8d, 0x18, 0xb5, 0x63, 0x93, 0x16, 0xbd, 0x0c, 0x03, + 0xcc, 0xbb, 0xa7, 0xe6, 0x37, 0x84, 0xb4, 0x29, 0x25, 0x93, 0x81, 0x8a, 0x80, 0x1f, 0x68, 0xbf, + 0xb1, 0xa2, 0x46, 0x17, 0xa1, 0x8f, 0x36, 0x61, 0xa9, 0x22, 0x8e, 0x53, 0xa5, 0x92, 0xbb, 0xc6, + 0xa0, 0x58, 0x60, 0xed, 0x5f, 0xb3, 0x98, 0x2c, 0x95, 0xde, 0xf3, 0xd1, 0x35, 0x76, 0x68, 0xb0, + 0x13, 0x44, 0x53, 0x89, 0x3d, 0xae, 0x9d, 0x04, 0x0a, 0x77, 0x90, 0xf8, 0x8f, 0x8d, 0x92, 0xe8, + 0xcd, 0xe4, 0xc9, 0xc0, 0x05, 0x8a, 0x17, 0x65, 0x17, 0x24, 0x4f, 0x87, 0x47, 0xe2, 0x23, 0x8e, + 0xb6, 0xa7, 0xdd, 0x11, 0x61, 0xff, 0x5f, 0x05, 0x6d, 0x96, 0x54, 0x23, 0x27, 0x22, 0xa8, 0x02, + 0xfd, 0x77, 0x1c, 0x37, 0x72, 0xbd, 0x4d, 0x21, 0xf7, 0xb5, 0x3f, 0xe8, 0x58, 0xa1, 0xdb, 0xbc, + 0x00, 0x97, 0x5e, 0xc4, 0x1f, 0x2c, 0xd9, 0x50, 0x8e, 0x41, 0xcb, 0xf3, 0x28, 0xc7, 0x42, 0xb7, + 0x1c, 0x31, 0x2f, 0xc0, 0x39, 0x8a, 0x3f, 0x58, 0xb2, 0x41, 0x6f, 0x01, 0xc8, 0x1d, 0x82, 0xd4, + 0x85, 0x57, 0xd0, 0xb3, 0x9d, 0x99, 0xae, 0xa9, 0x32, 0xb3, 0x23, 0x54, 0x36, 0x8a, 0xff, 0x63, + 0x8d, 0x9f, 0x1d, 0x69, 0x63, 0xaa, 0x37, 0x06, 0x7d, 0x82, 0x2e, 0x51, 0x27, 0x88, 0x48, 0x7d, + 0x26, 0x12, 0x9d, 0xf3, 0x74, 0x77, 0x97, 0xc3, 0x35, 0x77, 0x87, 0xe8, 0xcb, 0x59, 0x30, 0xc1, + 0x31, 0x3f, 0xfb, 0x97, 0x8a, 0x30, 0x99, 0xd7, 0x5c, 0xba, 0x68, 0xc8, 0x5d, 0x37, 0x9a, 0xa3, + 0x62, 0xad, 0x65, 0x2e, 0x9a, 0x05, 0x01, 0xc7, 0x8a, 0x82, 0xce, 0xde, 0xd0, 0xdd, 0x94, 0x77, + 0xfb, 0xde, 0x78, 0xf6, 0x56, 0x19, 0x14, 0x0b, 0x2c, 0xa5, 0x0b, 0x88, 0x13, 0x0a, 0xb7, 0x33, + 0x6d, 0x96, 0x63, 0x06, 0xc5, 0x02, 0xab, 0x6b, 0x19, 0x7b, 0x3a, 0x68, 0x19, 0x8d, 0x2e, 0xea, + 0x3d, 0xda, 0x2e, 0x42, 0x9f, 0x02, 0xd8, 0x70, 0x3d, 0x37, 0xdc, 0x62, 0xdc, 0xfb, 0x0e, 0xcd, + 0x5d, 0x09, 0xc5, 0x8b, 0x8a, 0x0b, 0xd6, 0x38, 0xa2, 0x97, 0x60, 0x50, 0x6d, 0x20, 0x4b, 0xf3, + 0xcc, 0x06, 0xaf, 0xf9, 0x34, 0xc5, 0xbb, 0xe9, 0x3c, 0xd6, 0xe9, 0xec, 0xcf, 0x24, 0xe7, 0x8b, + 0x58, 0x01, 0x5a, 0xff, 0x5a, 0xdd, 0xf6, 0x6f, 0xa1, 0x7d, 0xff, 0xda, 0xdf, 0xe8, 0x83, 0x51, + 0xa3, 0xb2, 0x56, 0xd8, 0xc5, 0x9e, 0x7b, 0x95, 0x1e, 0x40, 0x4e, 0x44, 0xc4, 0xfa, 0xb3, 0x3b, + 0x2f, 0x15, 0xfd, 0x90, 0xa2, 0x2b, 0x80, 0x97, 0x47, 0x9f, 0x82, 0x52, 0xc3, 0x09, 0x99, 0xc6, + 0x92, 0x88, 0x75, 0xd7, 0x0d, 0xb3, 0xf8, 0x42, 0xe8, 0x84, 0x91, 0x76, 0xea, 0x73, 0xde, 0x31, + 0x4b, 0x7a, 0x52, 0x52, 0xf9, 0x4a, 0xfa, 0x35, 0xaa, 0x46, 0x50, 0x21, 0x6c, 0x0f, 0x73, 0x1c, + 0x7a, 0x99, 0x6d, 0xad, 0x74, 0x56, 0xcc, 0x51, 0x69, 0x94, 0x4d, 0xb3, 0x5e, 0x43, 0xc8, 0x56, + 0x38, 0x6c, 0x50, 0xc6, 0x77, 0xb2, 0xbe, 0x36, 0x77, 0xb2, 0xa7, 0xa0, 0x9f, 0xfd, 0x50, 0x33, + 0x40, 0x8d, 0xc6, 0x12, 0x07, 0x63, 0x89, 0x4f, 0x4e, 0x98, 0x81, 0xee, 0x26, 0x0c, 0xbd, 0xf5, + 0x89, 0x49, 0xcd, 0xfc, 0x1f, 0x06, 0xf8, 0x2e, 0x27, 0xa6, 0x3c, 0x96, 0x38, 0xf4, 0x33, 0x16, + 0x20, 0xa7, 0x41, 0x6f, 0xcb, 0x14, 0xac, 0x2e, 0x37, 0xc0, 0x44, 0xed, 0x57, 0x3b, 0x76, 0x7b, + 0x2b, 0x9c, 0x9e, 0x49, 0x95, 0xe6, 0x9a, 0xd2, 0x57, 0x44, 0x13, 0x51, 0x9a, 0x40, 0x3f, 0x8c, + 0x96, 0xdd, 0x30, 0xfa, 0xfc, 0x1f, 0x25, 0x0e, 0xa7, 0x8c, 0x26, 0xa1, 0x9b, 0xfa, 0xe5, 0x6b, + 0xf0, 0x90, 0x97, 0xaf, 0xe1, 0xbc, 0x8b, 0xd7, 0x54, 0x0b, 0x4e, 0xe7, 0x7c, 0x41, 0x86, 0xfe, + 0x75, 0x5e, 0xd7, 0xbf, 0x76, 0xd0, 0xda, 0x4d, 0xcb, 0x3a, 0xa6, 0xdf, 0x68, 0x39, 0x5e, 0xe4, + 0x46, 0x7b, 0xba, 0xbe, 0xf6, 0x69, 0x18, 0x99, 0x77, 0xc8, 0x8e, 0xef, 0x2d, 0x78, 0xf5, 0xa6, + 0xef, 0x7a, 0x11, 0x9a, 0x84, 0x1e, 0x26, 0x7c, 0xf0, 0xad, 0xb7, 0x87, 0xf6, 0x1e, 0x66, 0x10, + 0x7b, 0x13, 0x4e, 0xce, 0xfb, 0x77, 0xbc, 0x3b, 0x4e, 0x50, 0x9f, 0xa9, 0x2c, 0x69, 0xfa, 0xa4, + 0x55, 0xa9, 0xcf, 0xb0, 0xf2, 0x6f, 0x8b, 0x5a, 0x49, 0x7e, 0x1d, 0x5a, 0x74, 0x1b, 0x24, 0x47, + 0xeb, 0xf7, 0xff, 0x16, 0x8c, 0x9a, 0x62, 0x7a, 0x65, 0x77, 0xb6, 0x72, 0xed, 0xce, 0x6f, 0xc0, + 0xc0, 0x86, 0x4b, 0x1a, 0x75, 0x4c, 0x36, 0x44, 0xef, 0x3c, 0x99, 0xef, 0x99, 0xb6, 0x48, 0x29, + 0xa5, 0x96, 0x97, 0x6b, 0x43, 0x16, 0x45, 0x61, 0xac, 0xd8, 0xa0, 0x6d, 0x18, 0x93, 0x7d, 0x28, + 0xb1, 0x62, 0x3f, 0x78, 0xaa, 0xdd, 0xc0, 0x9b, 0xcc, 0x4f, 0xdc, 0xdf, 0x2f, 0x8f, 0xe1, 0x04, + 0x1b, 0x9c, 0x62, 0x8c, 0xce, 0x42, 0xcf, 0x0e, 0x3d, 0xf9, 0x7a, 0x58, 0xf7, 0x33, 0xf5, 0x07, + 0xd3, 0xe4, 0x30, 0xa8, 0xfd, 0x63, 0x16, 0x9c, 0x4e, 0xf5, 0x8c, 0xd0, 0x68, 0x1d, 0xf1, 0x28, + 0x24, 0x35, 0x4c, 0x85, 0xce, 0x1a, 0x26, 0xfb, 0x6f, 0x59, 0x70, 0x62, 0x61, 0xa7, 0x19, 0xed, + 0xcd, 0xbb, 0xa6, 0x91, 0xf8, 0x83, 0xd0, 0xb7, 0x43, 0xea, 0x6e, 0x6b, 0x47, 0x8c, 0x5c, 0x59, + 0x9e, 0x0e, 0x2b, 0x0c, 0x7a, 0xb0, 0x5f, 0x1e, 0xae, 0x46, 0x7e, 0xe0, 0x6c, 0x12, 0x0e, 0xc0, + 0x82, 0x9c, 0x9d, 0xb1, 0xee, 0x3d, 0xb2, 0xec, 0xee, 0xb8, 0xd1, 0x83, 0xcd, 0x76, 0x61, 0xdf, + 0x95, 0x4c, 0x70, 0xcc, 0xcf, 0xfe, 0xba, 0x05, 0xa3, 0x72, 0xde, 0xcf, 0xd4, 0xeb, 0x01, 0x09, + 0x43, 0x34, 0x05, 0x05, 0xb7, 0x29, 0x5a, 0x09, 0xa2, 0x95, 0x85, 0xa5, 0x0a, 0x2e, 0xb8, 0x4d, + 0x29, 0xce, 0xb3, 0x03, 0xa8, 0x68, 0x9a, 0xba, 0xaf, 0x09, 0x38, 0x56, 0x14, 0xe8, 0x12, 0x0c, + 0x78, 0x7e, 0x9d, 0x4b, 0xc4, 0x5c, 0x94, 0x60, 0x13, 0x6c, 0x55, 0xc0, 0xb0, 0xc2, 0xa2, 0x0a, + 0x94, 0xb8, 0x23, 0x64, 0x3c, 0x69, 0xbb, 0x72, 0xa7, 0x64, 0x5f, 0xb6, 0x26, 0x4b, 0xe2, 0x98, + 0x89, 0xfd, 0x1b, 0x16, 0x0c, 0xc9, 0x2f, 0xeb, 0xf2, 0xae, 0x42, 0x97, 0x56, 0x7c, 0x4f, 0x89, + 0x97, 0x16, 0xbd, 0x6b, 0x30, 0x8c, 0x71, 0xc5, 0x28, 0x1e, 0xea, 0x8a, 0x71, 0x05, 0x06, 0x9d, + 0x66, 0xb3, 0x62, 0xde, 0x4f, 0xd8, 0x54, 0x9a, 0x89, 0xc1, 0x58, 0xa7, 0xb1, 0x7f, 0xb4, 0x00, + 0x23, 0xf2, 0x0b, 0xaa, 0xad, 0xf5, 0x90, 0x44, 0x68, 0x0d, 0x4a, 0x0e, 0x1f, 0x25, 0x22, 0x27, + 0xf9, 0x63, 0xd9, 0x7a, 0x33, 0x63, 0x48, 0x63, 0x41, 0x6b, 0x46, 0x96, 0xc6, 0x31, 0x23, 0xd4, + 0x80, 0x71, 0xcf, 0x8f, 0xd8, 0xa1, 0xab, 0xf0, 0xed, 0x4c, 0x99, 0x49, 0xee, 0x67, 0x04, 0xf7, + 0xf1, 0xd5, 0x24, 0x17, 0x9c, 0x66, 0x8c, 0x16, 0xa4, 0x2e, 0xb2, 0x98, 0xaf, 0x44, 0xd2, 0x07, + 0x2e, 0x5b, 0x15, 0x69, 0xff, 0xaa, 0x05, 0x25, 0x49, 0x76, 0x1c, 0x56, 0xeb, 0x15, 0xe8, 0x0f, + 0xd9, 0x20, 0xc8, 0xae, 0xb1, 0xdb, 0x35, 0x9c, 0x8f, 0x57, 0x2c, 0x4b, 0xf0, 0xff, 0x21, 0x96, + 0x3c, 0x98, 0x29, 0x4a, 0x35, 0xff, 0x5d, 0x62, 0x8a, 0x52, 0xed, 0xc9, 0x39, 0x94, 0xfe, 0x94, + 0xb5, 0x59, 0xd3, 0xed, 0x52, 0x91, 0xb7, 0x19, 0x90, 0x0d, 0xf7, 0x6e, 0x52, 0xe4, 0xad, 0x30, + 0x28, 0x16, 0x58, 0xf4, 0x16, 0x0c, 0xd5, 0xa4, 0x0d, 0x22, 0x5e, 0xe1, 0x17, 0xdb, 0xda, 0xc3, + 0x94, 0xe9, 0x94, 0xeb, 0xd0, 0xe6, 0xb4, 0xf2, 0xd8, 0xe0, 0x66, 0x3a, 0xfa, 0x14, 0x3b, 0x39, + 0xfa, 0xc4, 0x7c, 0xf3, 0xdd, 0x5e, 0x7e, 0xdc, 0x82, 0x3e, 0xae, 0x7b, 0xee, 0x4e, 0xf5, 0xaf, + 0x59, 0x92, 0xe3, 0xbe, 0xbb, 0x45, 0x81, 0x42, 0xd2, 0x40, 0x2b, 0x50, 0x62, 0x3f, 0x98, 0xee, + 0xbc, 0x98, 0xff, 0x0e, 0x87, 0xd7, 0xaa, 0x37, 0xf0, 0x96, 0x2c, 0x86, 0x63, 0x0e, 0xf6, 0x8f, + 0x14, 0xe9, 0xee, 0x16, 0x93, 0x1a, 0x87, 0xbe, 0xf5, 0xf0, 0x0e, 0xfd, 0xc2, 0xc3, 0x3a, 0xf4, + 0x37, 0x61, 0xb4, 0xa6, 0xd9, 0x9d, 0xe3, 0x91, 0xbc, 0xd4, 0x76, 0x92, 0x68, 0x26, 0x6a, 0xae, + 0x9d, 0x9b, 0x33, 0x99, 0xe0, 0x24, 0x57, 0xf4, 0x09, 0x18, 0xe2, 0xe3, 0x2c, 0x6a, 0xe1, 0xbe, + 0x52, 0x4f, 0xe4, 0xcf, 0x17, 0xbd, 0x0a, 0xae, 0xcd, 0xd5, 0x8a, 0x63, 0x83, 0x99, 0xfd, 0x97, + 0x16, 0xa0, 0x85, 0xe6, 0x16, 0xd9, 0x21, 0x81, 0xd3, 0x88, 0xcd, 0x47, 0x3f, 0x68, 0xc1, 0x24, + 0x49, 0x81, 0xe7, 0xfc, 0x9d, 0x1d, 0x71, 0x59, 0xcc, 0xd1, 0x67, 0x2c, 0xe4, 0x94, 0x51, 0x0f, + 0x95, 0x26, 0xf3, 0x28, 0x70, 0x6e, 0x7d, 0x68, 0x05, 0x26, 0xf8, 0x29, 0xa9, 0x10, 0x9a, 0xdf, + 0xd5, 0x23, 0x82, 0xf1, 0xc4, 0x5a, 0x9a, 0x04, 0x67, 0x95, 0xb3, 0x7f, 0x75, 0x18, 0x72, 0x5b, + 0xf1, 0x9e, 0xdd, 0xec, 0x3d, 0xbb, 0xd9, 0x7b, 0x76, 0xb3, 0xf7, 0xec, 0x66, 0xef, 0xd9, 0xcd, + 0xde, 0xb3, 0x9b, 0xbd, 0x4b, 0xed, 0x66, 0xff, 0xb7, 0x05, 0x27, 0xd5, 0xf1, 0x65, 0x5c, 0xd8, + 0x3f, 0x0b, 0x13, 0x7c, 0xb9, 0x19, 0x3e, 0xc6, 0xe2, 0xb8, 0xbe, 0x92, 0x39, 0x73, 0x13, 0xbe, + 0xf0, 0x46, 0x41, 0xfe, 0xa8, 0x28, 0x03, 0x81, 0xb3, 0xaa, 0xb1, 0x7f, 0x69, 0x00, 0x7a, 0x17, + 0x76, 0x89, 0x17, 0x1d, 0xc3, 0xd5, 0xa6, 0x06, 0x23, 0xae, 0xb7, 0xeb, 0x37, 0x76, 0x49, 0x9d, + 0xe3, 0x0f, 0x73, 0x03, 0x3f, 0x25, 0x58, 0x8f, 0x2c, 0x19, 0x2c, 0x70, 0x82, 0xe5, 0xc3, 0xb0, + 0x3e, 0x5c, 0x85, 0x3e, 0x7e, 0xf8, 0x08, 0xd3, 0x43, 0xe6, 0x9e, 0xcd, 0x3a, 0x51, 0x1c, 0xa9, + 0xb1, 0x65, 0x84, 0x1f, 0x6e, 0xa2, 0x38, 0xfa, 0x0c, 0x8c, 0x6c, 0xb8, 0x41, 0x18, 0xad, 0xb9, + 0x3b, 0xf4, 0x68, 0xd8, 0x69, 0x3e, 0x80, 0xb5, 0x41, 0xf5, 0xc3, 0xa2, 0xc1, 0x09, 0x27, 0x38, + 0xa3, 0x4d, 0x18, 0x6e, 0x38, 0x7a, 0x55, 0xfd, 0x87, 0xae, 0x4a, 0x9d, 0x0e, 0xcb, 0x3a, 0x23, + 0x6c, 0xf2, 0xa5, 0xcb, 0xa9, 0xc6, 0x14, 0xe6, 0x03, 0x4c, 0x9d, 0xa1, 0x96, 0x13, 0xd7, 0x94, + 0x73, 0x1c, 0x15, 0xd0, 0x98, 0x23, 0x7b, 0xc9, 0x14, 0xd0, 0x34, 0x77, 0xf5, 0x4f, 0x43, 0x89, + 0xd0, 0x2e, 0xa4, 0x8c, 0xc5, 0x01, 0x73, 0xb9, 0xbb, 0xb6, 0xae, 0xb8, 0xb5, 0xc0, 0x37, 0xed, + 0x3c, 0x0b, 0x92, 0x13, 0x8e, 0x99, 0xa2, 0x39, 0xe8, 0x0b, 0x49, 0xe0, 0x2a, 0x5d, 0x72, 0x9b, + 0x61, 0x64, 0x64, 0xfc, 0xd5, 0x1a, 0xff, 0x8d, 0x45, 0x51, 0x3a, 0xbd, 0x1c, 0xa6, 0x8a, 0x65, + 0x87, 0x81, 0x36, 0xbd, 0x66, 0x18, 0x14, 0x0b, 0x2c, 0x7a, 0x1d, 0xfa, 0x03, 0xd2, 0x60, 0x86, + 0xc4, 0xe1, 0xee, 0x27, 0x39, 0xb7, 0x4b, 0xf2, 0x72, 0x58, 0x32, 0x40, 0xd7, 0x01, 0x05, 0x84, + 0x0a, 0x78, 0xae, 0xb7, 0xa9, 0xdc, 0xbb, 0xc5, 0x46, 0xab, 0x04, 0x69, 0x1c, 0x53, 0xc8, 0x07, + 0x8b, 0x38, 0xa3, 0x18, 0xba, 0x0a, 0xe3, 0x0a, 0xba, 0xe4, 0x85, 0x91, 0x43, 0x37, 0xb8, 0x51, + 0xc6, 0x4b, 0xe9, 0x57, 0x70, 0x92, 0x00, 0xa7, 0xcb, 0xd8, 0x3f, 0x67, 0x01, 0xef, 0xe7, 0x63, + 0xd0, 0x2a, 0xbc, 0x66, 0x6a, 0x15, 0xce, 0xe4, 0x8e, 0x5c, 0x8e, 0x46, 0xe1, 0xe7, 0x2c, 0x18, + 0xd4, 0x46, 0x36, 0x9e, 0xb3, 0x56, 0x9b, 0x39, 0xdb, 0x82, 0x31, 0x3a, 0xd3, 0x6f, 0xac, 0x87, + 0x24, 0xd8, 0x25, 0x75, 0x36, 0x31, 0x0b, 0x0f, 0x36, 0x31, 0x95, 0x2b, 0xe9, 0x72, 0x82, 0x21, + 0x4e, 0x55, 0x61, 0x7f, 0x5a, 0x36, 0x55, 0x79, 0xde, 0xd6, 0xd4, 0x98, 0x27, 0x3c, 0x6f, 0xd5, + 0xa8, 0xe2, 0x98, 0x86, 0x2e, 0xb5, 0x2d, 0x3f, 0x8c, 0x92, 0x9e, 0xb7, 0xd7, 0xfc, 0x30, 0xc2, + 0x0c, 0x63, 0xbf, 0x00, 0xb0, 0x70, 0x97, 0xd4, 0xf8, 0x8c, 0xd5, 0x2f, 0x3d, 0x56, 0xfe, 0xa5, + 0xc7, 0xfe, 0x3d, 0x0b, 0x46, 0x16, 0xe7, 0x8c, 0x93, 0x6b, 0x1a, 0x80, 0xdf, 0xd4, 0x6e, 0xdf, + 0x5e, 0x95, 0xee, 0x1f, 0xdc, 0x02, 0xae, 0xa0, 0x58, 0xa3, 0x40, 0x67, 0xa0, 0xd8, 0x68, 0x79, + 0x42, 0xed, 0xd9, 0x4f, 0x8f, 0xc7, 0xe5, 0x96, 0x87, 0x29, 0x4c, 0x7b, 0xac, 0x54, 0xec, 0xfa, + 0xb1, 0x52, 0xc7, 0x20, 0x25, 0xa8, 0x0c, 0xbd, 0x77, 0xee, 0xb8, 0x75, 0xfe, 0x14, 0x5c, 0xb8, + 0xa6, 0xdc, 0xbe, 0xbd, 0x34, 0x1f, 0x62, 0x0e, 0xb7, 0xbf, 0x58, 0x84, 0xa9, 0xc5, 0x06, 0xb9, + 0xfb, 0x0e, 0x9f, 0xc3, 0x77, 0xfb, 0xd4, 0xea, 0x70, 0x0a, 0xa4, 0xc3, 0x3e, 0xa7, 0xeb, 0xdc, + 0x1f, 0x1b, 0xd0, 0xcf, 0x1d, 0x4f, 0xe5, 0xe3, 0xf8, 0x4c, 0x73, 0x5f, 0x7e, 0x87, 0x4c, 0x73, + 0x07, 0x56, 0x61, 0xee, 0x53, 0x07, 0xa6, 0x80, 0x62, 0xc9, 0x7c, 0xea, 0x15, 0x18, 0xd2, 0x29, + 0x0f, 0xf5, 0xb0, 0xf5, 0xbb, 0x8b, 0x30, 0x46, 0x5b, 0xf0, 0x50, 0x07, 0xe2, 0x66, 0x7a, 0x20, + 0x8e, 0xfa, 0x71, 0x63, 0xe7, 0xd1, 0x78, 0x2b, 0x39, 0x1a, 0x57, 0xf2, 0x46, 0xe3, 0xb8, 0xc7, + 0xe0, 0x7b, 0x2c, 0x98, 0x58, 0x6c, 0xf8, 0xb5, 0xed, 0xc4, 0x03, 0xc4, 0x97, 0x60, 0x90, 0x6e, + 0xc7, 0xa1, 0x11, 0x8b, 0xc3, 0x88, 0xce, 0x22, 0x50, 0x58, 0xa7, 0xd3, 0x8a, 0xdd, 0xbc, 0xb9, + 0x34, 0x9f, 0x15, 0xd4, 0x45, 0xa0, 0xb0, 0x4e, 0x67, 0xff, 0x8e, 0x05, 0xe7, 0xae, 0xce, 0x2d, + 0xc4, 0x53, 0x31, 0x15, 0x57, 0xe6, 0x22, 0xf4, 0x35, 0xeb, 0x5a, 0x53, 0x62, 0xb5, 0xf0, 0x3c, + 0x6b, 0x85, 0xc0, 0xbe, 0x5b, 0x62, 0x26, 0xdd, 0x04, 0xb8, 0x8a, 0x2b, 0x73, 0x62, 0xdf, 0x95, + 0x56, 0x20, 0x2b, 0xd7, 0x0a, 0xf4, 0x04, 0xf4, 0xd3, 0x73, 0xc1, 0xad, 0xc9, 0x76, 0x73, 0x83, + 0x3e, 0x07, 0x61, 0x89, 0xb3, 0x7f, 0xd6, 0x82, 0x89, 0xab, 0x6e, 0x44, 0x0f, 0xed, 0x64, 0xe0, + 0x14, 0x7a, 0x6a, 0x87, 0x6e, 0xe4, 0x07, 0x7b, 0xc9, 0xc0, 0x29, 0x58, 0x61, 0xb0, 0x46, 0xc5, + 0x3f, 0x68, 0xd7, 0x65, 0x2f, 0x29, 0x0a, 0xa6, 0xdd, 0x0d, 0x0b, 0x38, 0x56, 0x14, 0xb4, 0xbf, + 0xea, 0x6e, 0xc0, 0x54, 0x96, 0x7b, 0x62, 0xe3, 0x56, 0xfd, 0x35, 0x2f, 0x11, 0x38, 0xa6, 0xb1, + 0xff, 0xdc, 0x82, 0xf2, 0xd5, 0x46, 0x2b, 0x8c, 0x48, 0xb0, 0x11, 0xe6, 0x6c, 0xba, 0x2f, 0x40, + 0x89, 0x48, 0x03, 0x81, 0x7c, 0xf2, 0x29, 0x05, 0x51, 0x65, 0x39, 0xe0, 0xf1, 0x5b, 0x14, 0x5d, + 0x17, 0xaf, 0xa4, 0x0f, 0xf7, 0xcc, 0x75, 0x11, 0x10, 0xd1, 0xeb, 0xd2, 0x03, 0xda, 0xb0, 0xc8, + 0x18, 0x0b, 0x29, 0x2c, 0xce, 0x28, 0x61, 0xff, 0x98, 0x05, 0x27, 0xd5, 0x07, 0xbf, 0xeb, 0x3e, + 0xd3, 0xfe, 0x6a, 0x01, 0x86, 0xaf, 0xad, 0xad, 0x55, 0xae, 0x92, 0x48, 0x9b, 0x95, 0xed, 0xcd, + 0xfe, 0x58, 0xb3, 0x5e, 0xb6, 0xbb, 0x23, 0xb6, 0x22, 0xb7, 0x31, 0xcd, 0xe3, 0xa2, 0x4d, 0x2f, + 0x79, 0xd1, 0x8d, 0xa0, 0x1a, 0x05, 0xae, 0xb7, 0x99, 0x39, 0xd3, 0xa5, 0xcc, 0x52, 0xcc, 0x93, + 0x59, 0xd0, 0x0b, 0xd0, 0xc7, 0x02, 0xb3, 0xc9, 0x41, 0x78, 0x44, 0x5d, 0xb1, 0x18, 0xf4, 0x60, + 0xbf, 0x5c, 0xba, 0x89, 0x97, 0xf8, 0x1f, 0x2c, 0x48, 0xd1, 0x4d, 0x18, 0xdc, 0x8a, 0xa2, 0xe6, + 0x35, 0xe2, 0xd4, 0x49, 0x20, 0x77, 0xd9, 0xf3, 0x59, 0xbb, 0x2c, 0xed, 0x04, 0x4e, 0x16, 0x6f, + 0x4c, 0x31, 0x2c, 0xc4, 0x3a, 0x1f, 0xbb, 0x0a, 0x10, 0xe3, 0x8e, 0xc8, 0x70, 0x63, 0xaf, 0x41, + 0x89, 0x7e, 0xee, 0x4c, 0xc3, 0x75, 0xda, 0x9b, 0xc6, 0x9f, 0x81, 0x92, 0x34, 0x7c, 0x87, 0x22, + 0x8a, 0x03, 0x3b, 0x91, 0xa4, 0x5d, 0x3c, 0xc4, 0x31, 0xde, 0x7e, 0x1c, 0x84, 0x6f, 0x69, 0x3b, + 0x96, 0xf6, 0x06, 0x9c, 0x60, 0x4e, 0xb2, 0x4e, 0xb4, 0x65, 0xcc, 0xd1, 0xce, 0x93, 0xe1, 0x59, + 0x71, 0xaf, 0xe3, 0x5f, 0x36, 0xa9, 0x3d, 0x4e, 0x1e, 0x92, 0x1c, 0xe3, 0x3b, 0x9e, 0xfd, 0x67, + 0x3d, 0xf0, 0xc8, 0x52, 0x35, 0x3f, 0xfc, 0xd0, 0xcb, 0x30, 0xc4, 0xc5, 0x45, 0x3a, 0x35, 0x9c, + 0x86, 0xa8, 0x57, 0x69, 0x40, 0xd7, 0x34, 0x1c, 0x36, 0x28, 0xd1, 0x39, 0x28, 0xba, 0x6f, 0x7b, + 0xc9, 0xa7, 0x7b, 0x4b, 0x6f, 0xac, 0x62, 0x0a, 0xa7, 0x68, 0x2a, 0x79, 0xf2, 0x2d, 0x5d, 0xa1, + 0x95, 0xf4, 0xf9, 0x1a, 0x8c, 0xb8, 0x61, 0x2d, 0x74, 0x97, 0x3c, 0xba, 0x4e, 0xb5, 0x95, 0xae, + 0x74, 0x0e, 0xb4, 0xd1, 0x0a, 0x8b, 0x13, 0xd4, 0xda, 0xf9, 0xd2, 0xdb, 0xb5, 0xf4, 0xda, 0x31, + 0xf8, 0x01, 0xdd, 0xfe, 0x9b, 0xec, 0xeb, 0x42, 0xa6, 0x82, 0x17, 0xdb, 0x3f, 0xff, 0xe0, 0x10, + 0x4b, 0x1c, 0xbd, 0xd0, 0xd5, 0xb6, 0x9c, 0xe6, 0x4c, 0x2b, 0xda, 0x9a, 0x77, 0xc3, 0x9a, 0xbf, + 0x4b, 0x82, 0x3d, 0x76, 0x17, 0x1f, 0x88, 0x2f, 0x74, 0x0a, 0x31, 0x77, 0x6d, 0xa6, 0x42, 0x29, + 0x71, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x09, 0xac, 0x92, 0x90, 0x1d, 0x01, 0x83, 0x8c, 0x8d, 0x7a, + 0x4c, 0x27, 0xc0, 0x8a, 0x49, 0x92, 0xde, 0x14, 0x70, 0xe1, 0x28, 0x04, 0xdc, 0x0f, 0xc2, 0xb0, + 0xeb, 0xb9, 0x91, 0xeb, 0x44, 0x3e, 0xb7, 0x1f, 0xf1, 0x6b, 0x37, 0x53, 0x30, 0x2f, 0xe9, 0x08, + 0x6c, 0xd2, 0xd9, 0xff, 0xb6, 0x07, 0xc6, 0xd9, 0xb0, 0xbd, 0x37, 0xc3, 0xbe, 0x9d, 0x66, 0xd8, + 0xcd, 0xf4, 0x0c, 0x3b, 0x0a, 0xc9, 0xfd, 0x81, 0xa7, 0xd9, 0x67, 0xa0, 0xa4, 0xde, 0x0f, 0xca, + 0x07, 0xc4, 0x56, 0xce, 0x03, 0xe2, 0xce, 0xa7, 0xb7, 0x74, 0x49, 0x2b, 0x66, 0xba, 0xa4, 0x7d, + 0xd9, 0x82, 0xd8, 0xb0, 0x80, 0xde, 0x80, 0x52, 0xd3, 0x67, 0x1e, 0xae, 0x81, 0x74, 0x1b, 0x7f, + 0xbc, 0xad, 0x65, 0x82, 0x47, 0x60, 0x0b, 0x78, 0x2f, 0x54, 0x64, 0x51, 0x1c, 0x73, 0x41, 0xd7, + 0xa1, 0xbf, 0x19, 0x90, 0x6a, 0xc4, 0xc2, 0x03, 0x75, 0xcf, 0x90, 0xcf, 0x1a, 0x5e, 0x10, 0x4b, + 0x0e, 0xf6, 0xbf, 0xb7, 0x60, 0x2c, 0x49, 0x8a, 0x3e, 0x0c, 0x3d, 0xe4, 0x2e, 0xa9, 0x89, 0xf6, + 0x66, 0x1e, 0xc5, 0xb1, 0x6a, 0x82, 0x77, 0x00, 0xfd, 0x8f, 0x59, 0x29, 0x74, 0x0d, 0xfa, 0xe9, + 0x39, 0x7c, 0x55, 0x85, 0xc2, 0x7b, 0x34, 0xef, 0x2c, 0x57, 0x02, 0x0d, 0x6f, 0x9c, 0x00, 0x61, + 0x59, 0x9c, 0xf9, 0x81, 0xd5, 0x9a, 0x55, 0x7a, 0xc5, 0x89, 0xda, 0xdd, 0xc4, 0xd7, 0xe6, 0x2a, + 0x9c, 0x48, 0x70, 0xe3, 0x7e, 0x60, 0x12, 0x88, 0x63, 0x26, 0xf6, 0x2f, 0x58, 0x00, 0xdc, 0xed, + 0xcd, 0xf1, 0x36, 0xc9, 0x31, 0x68, 0xd3, 0xe7, 0xa1, 0x27, 0x6c, 0x92, 0x5a, 0x3b, 0xe7, 0xeb, + 0xb8, 0x3d, 0xd5, 0x26, 0xa9, 0xc5, 0x33, 0x8e, 0xfe, 0xc3, 0xac, 0xb4, 0xfd, 0xbd, 0x00, 0x23, + 0x31, 0xd9, 0x52, 0x44, 0x76, 0xd0, 0x73, 0x46, 0xd0, 0x91, 0x33, 0x89, 0xa0, 0x23, 0x25, 0x46, + 0xad, 0x29, 0x6e, 0x3f, 0x03, 0xc5, 0x1d, 0xe7, 0xae, 0xd0, 0xcc, 0x3d, 0xd3, 0xbe, 0x19, 0x94, + 0xff, 0xf4, 0x8a, 0x73, 0x97, 0x5f, 0x5e, 0x9f, 0x91, 0x2b, 0x64, 0xc5, 0xb9, 0xdb, 0xd1, 0x41, + 0x98, 0x56, 0xc2, 0xea, 0x72, 0x3d, 0xe1, 0xd1, 0xd5, 0x55, 0x5d, 0xae, 0x97, 0xac, 0xcb, 0xf5, + 0xba, 0xa8, 0xcb, 0xf5, 0xd0, 0x3d, 0xe8, 0x17, 0x0e, 0x97, 0x22, 0x2c, 0xd9, 0xe5, 0x2e, 0xea, + 0x13, 0xfe, 0x9a, 0xbc, 0xce, 0xcb, 0xf2, 0x72, 0x2e, 0xa0, 0x1d, 0xeb, 0x95, 0x15, 0xa2, 0xff, + 0xc7, 0x82, 0x11, 0xf1, 0x1b, 0x93, 0xb7, 0x5b, 0x24, 0x8c, 0x84, 0xf0, 0xfa, 0x81, 0xee, 0xdb, + 0x20, 0x0a, 0xf2, 0xa6, 0x7c, 0x40, 0x9e, 0x33, 0x26, 0xb2, 0x63, 0x8b, 0x12, 0xad, 0x40, 0x7f, + 0xdb, 0x82, 0x13, 0x3b, 0xce, 0x5d, 0x5e, 0x23, 0x87, 0x61, 0x27, 0x72, 0x7d, 0xe1, 0xb8, 0xf0, + 0xe1, 0xee, 0x86, 0x3f, 0x55, 0x9c, 0x37, 0x52, 0x5a, 0x29, 0x4f, 0x64, 0x91, 0x74, 0x6c, 0x6a, + 0x66, 0xbb, 0xa6, 0x36, 0x60, 0x40, 0xce, 0xb7, 0x87, 0xe9, 0xdd, 0xcd, 0xea, 0x11, 0x73, 0xed, + 0xa1, 0xd6, 0xf3, 0x19, 0x18, 0xd2, 0xe7, 0xd8, 0x43, 0xad, 0xeb, 0x6d, 0x98, 0xc8, 0x98, 0x4b, + 0x0f, 0xb5, 0xca, 0x3b, 0x70, 0x26, 0x77, 0x7e, 0x3c, 0x54, 0xef, 0xfc, 0xaf, 0x5a, 0xfa, 0x3e, + 0x78, 0x0c, 0x26, 0x8d, 0x39, 0xd3, 0xa4, 0x71, 0xbe, 0xfd, 0xca, 0xc9, 0xb1, 0x6b, 0xbc, 0xa5, + 0x37, 0x9a, 0xee, 0xea, 0xe8, 0x75, 0xe8, 0x6b, 0x50, 0x88, 0x74, 0xdb, 0xb5, 0x3b, 0xaf, 0xc8, + 0x58, 0x98, 0x64, 0xf0, 0x10, 0x0b, 0x0e, 0xf6, 0x2f, 0x5b, 0xd0, 0x73, 0x0c, 0x3d, 0x81, 0xcd, + 0x9e, 0x78, 0x2e, 0x97, 0xb5, 0x88, 0xd0, 0x3e, 0x8d, 0x9d, 0x3b, 0x0b, 0x77, 0x23, 0xe2, 0x85, + 0xec, 0x44, 0xce, 0xec, 0x98, 0x9f, 0xb2, 0x60, 0x62, 0xd9, 0x77, 0xea, 0xb3, 0x4e, 0xc3, 0xf1, + 0x6a, 0x24, 0x58, 0xf2, 0x36, 0x0f, 0xe5, 0x73, 0x5e, 0xe8, 0xe8, 0x73, 0x3e, 0x27, 0x5d, 0xb6, + 0x7a, 0xf2, 0xc7, 0x8f, 0x4a, 0xd2, 0xc9, 0x30, 0x4c, 0x86, 0x73, 0xf1, 0x16, 0x20, 0xbd, 0x95, + 0xe2, 0xe5, 0x15, 0x86, 0x7e, 0x97, 0xb7, 0x57, 0x0c, 0xe2, 0x93, 0xd9, 0x12, 0x6e, 0xea, 0xf3, + 0xb4, 0x37, 0x45, 0x1c, 0x80, 0x25, 0x23, 0xfb, 0x65, 0xc8, 0x0c, 0x9b, 0xd1, 0x59, 0x7b, 0x61, + 0x7f, 0x1c, 0xc6, 0x59, 0xc9, 0x43, 0x6a, 0x06, 0xec, 0x84, 0xce, 0x35, 0x23, 0x04, 0xa8, 0xfd, + 0x05, 0x0b, 0x46, 0x57, 0x13, 0x91, 0x11, 0x2f, 0x32, 0x2b, 0x6d, 0x86, 0xaa, 0xbf, 0xca, 0xa0, + 0x58, 0x60, 0x8f, 0x5c, 0x15, 0xf6, 0xd7, 0x16, 0xc4, 0x91, 0x6c, 0x8e, 0x41, 0x7c, 0x9b, 0x33, + 0xc4, 0xb7, 0x4c, 0x41, 0x56, 0x35, 0x27, 0x4f, 0x7a, 0x43, 0xd7, 0x55, 0x8c, 0xb7, 0x36, 0x32, + 0x6c, 0xcc, 0x86, 0x4f, 0xc5, 0x11, 0x33, 0x10, 0x9c, 0x8c, 0xfa, 0x66, 0xff, 0x7e, 0x01, 0x90, + 0xa2, 0xed, 0x3a, 0x06, 0x5d, 0xba, 0xc4, 0xd1, 0xc4, 0xa0, 0xdb, 0x05, 0xc4, 0xfc, 0x0c, 0x02, + 0xc7, 0x0b, 0x39, 0x5b, 0x57, 0x28, 0xff, 0x0e, 0xe7, 0xc4, 0x30, 0x25, 0x1f, 0xa5, 0x2d, 0xa7, + 0xb8, 0xe1, 0x8c, 0x1a, 0x34, 0xff, 0x91, 0xde, 0x6e, 0xfd, 0x47, 0xfa, 0x3a, 0xbc, 0xae, 0xfc, + 0x8a, 0x05, 0xc3, 0xaa, 0x9b, 0xde, 0x25, 0x3e, 0xf8, 0xaa, 0x3d, 0x39, 0x1b, 0x68, 0x45, 0x6b, + 0x32, 0x3b, 0x58, 0xbe, 0x83, 0xbd, 0x92, 0x75, 0x1a, 0xee, 0x3d, 0xa2, 0x62, 0x96, 0x96, 0xc5, + 0xab, 0x57, 0x01, 0x3d, 0xd8, 0x2f, 0x0f, 0xab, 0x7f, 0x3c, 0x26, 0x7b, 0x5c, 0x84, 0x6e, 0xc9, + 0xa3, 0x89, 0xa9, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13, 0x92, 0xc4, 0x5b, 0xa5, 0xde, 0x0a, + 0x05, 0x1e, 0xec, 0x97, 0x47, 0x54, 0x01, 0x06, 0xc1, 0x9c, 0xba, 0xfb, 0xc8, 0x7e, 0xe9, 0xc9, + 0xd9, 0x31, 0xb2, 0xdf, 0x5f, 0x5a, 0xd0, 0xb3, 0xea, 0xd7, 0x8f, 0x63, 0x0b, 0x78, 0xcd, 0xd8, + 0x02, 0xce, 0xe6, 0xa5, 0xcb, 0xc8, 0x5d, 0xfd, 0x8b, 0x89, 0xd5, 0x7f, 0x3e, 0x97, 0x43, 0xfb, + 0x85, 0xbf, 0x03, 0x83, 0x2c, 0x09, 0x87, 0x78, 0x97, 0xf5, 0x82, 0xb1, 0xe0, 0xcb, 0x89, 0x05, + 0x3f, 0xaa, 0x91, 0x6a, 0x2b, 0xfd, 0x29, 0xe8, 0x17, 0x0f, 0x7d, 0x92, 0x8f, 0x8d, 0x05, 0x2d, + 0x96, 0x78, 0xfb, 0xc7, 0x8b, 0x60, 0x24, 0xfd, 0x40, 0xbf, 0x6a, 0xc1, 0x74, 0xc0, 0x1d, 0x80, + 0xeb, 0xf3, 0xad, 0xc0, 0xf5, 0x36, 0xab, 0xb5, 0x2d, 0x52, 0x6f, 0x35, 0x5c, 0x6f, 0x73, 0x69, + 0xd3, 0xf3, 0x15, 0x78, 0xe1, 0x2e, 0xa9, 0xb5, 0x98, 0x71, 0xae, 0x43, 0x86, 0x11, 0xe5, 0x48, + 0xff, 0xfc, 0xfd, 0xfd, 0xf2, 0x34, 0x3e, 0x14, 0x6f, 0x7c, 0xc8, 0xb6, 0xa0, 0xdf, 0xb1, 0xe0, + 0x32, 0xcf, 0x85, 0xd1, 0x7d, 0xfb, 0xdb, 0xdc, 0x96, 0x2b, 0x92, 0x55, 0xcc, 0x64, 0x8d, 0x04, + 0x3b, 0xb3, 0x1f, 0x14, 0x1d, 0x7a, 0xb9, 0x72, 0xb8, 0xba, 0xf0, 0x61, 0x1b, 0x67, 0xff, 0xc3, + 0x22, 0x0c, 0x8b, 0x08, 0x70, 0xe2, 0x0c, 0x78, 0xc9, 0x98, 0x12, 0x8f, 0x26, 0xa6, 0xc4, 0xb8, + 0x41, 0x7c, 0x34, 0xdb, 0x7f, 0x08, 0xe3, 0x74, 0x73, 0xbe, 0x46, 0x9c, 0x20, 0x5a, 0x27, 0x0e, + 0x77, 0x0b, 0x2b, 0x1e, 0x7a, 0xf7, 0x57, 0xfa, 0xc9, 0xe5, 0x24, 0x33, 0x9c, 0xe6, 0xff, 0xed, + 0x74, 0xe6, 0x78, 0x30, 0x96, 0x0a, 0xe2, 0xf7, 0x26, 0x94, 0xd4, 0x2b, 0x15, 0xb1, 0xe9, 0xb4, + 0x8f, 0x85, 0x99, 0xe4, 0xc0, 0xd5, 0x5f, 0xf1, 0x0b, 0xa9, 0x98, 0x9d, 0xfd, 0x77, 0x0b, 0x46, + 0x85, 0x7c, 0x10, 0x57, 0x61, 0xc0, 0x09, 0x43, 0x77, 0xd3, 0x23, 0xf5, 0x76, 0x1a, 0xca, 0x54, + 0x35, 0xec, 0xa5, 0xd0, 0x8c, 0x28, 0x89, 0x15, 0x0f, 0x74, 0x8d, 0x3b, 0xdf, 0xed, 0x92, 0x76, + 0xea, 0xc9, 0x14, 0x37, 0x90, 0xee, 0x79, 0xbb, 0x04, 0x8b, 0xf2, 0xe8, 0x93, 0xdc, 0x3b, 0xf2, + 0xba, 0xe7, 0xdf, 0xf1, 0xae, 0xfa, 0xbe, 0x8c, 0xf6, 0xd1, 0x1d, 0xc3, 0x71, 0xe9, 0x13, 0xa9, + 0x8a, 0x63, 0x93, 0x5b, 0x77, 0x51, 0x71, 0x3f, 0x0b, 0x2c, 0xf6, 0xbf, 0xf9, 0x28, 0x3c, 0x44, + 0x04, 0x46, 0x45, 0x78, 0x41, 0x09, 0x13, 0x7d, 0x97, 0x79, 0x95, 0x33, 0x4b, 0xc7, 0x8a, 0xf4, + 0xeb, 0x26, 0x0b, 0x9c, 0xe4, 0x69, 0xff, 0x8c, 0x05, 0xec, 0x81, 0xec, 0x31, 0xc8, 0x23, 0x1f, + 0x31, 0xe5, 0x91, 0xc9, 0xbc, 0x4e, 0xce, 0x11, 0x45, 0x5e, 0xe4, 0x33, 0xab, 0x12, 0xf8, 0x77, + 0xf7, 0x84, 0x4b, 0x4b, 0xe7, 0xfb, 0x87, 0xfd, 0xdf, 0x2d, 0xbe, 0x89, 0xc5, 0xe1, 0x04, 0x3e, + 0x07, 0x03, 0x35, 0xa7, 0xe9, 0xd4, 0x78, 0x86, 0xaa, 0x5c, 0x8d, 0x9e, 0x51, 0x68, 0x7a, 0x4e, + 0x94, 0xe0, 0x1a, 0x2a, 0x19, 0xa6, 0x72, 0x40, 0x82, 0x3b, 0x6a, 0xa5, 0x54, 0x95, 0x53, 0xdb, + 0x30, 0x6c, 0x30, 0x7b, 0xa8, 0xea, 0x8c, 0xcf, 0xf1, 0x23, 0x56, 0x85, 0x55, 0xdd, 0x81, 0x71, + 0x4f, 0xfb, 0x4f, 0x0f, 0x14, 0x79, 0xb9, 0x7c, 0xbc, 0xd3, 0x21, 0xca, 0x4e, 0x1f, 0xed, 0xed, + 0x6d, 0x82, 0x0d, 0x4e, 0x73, 0xb6, 0x7f, 0xc2, 0x82, 0xd3, 0x3a, 0xa1, 0xf6, 0xbc, 0xa7, 0x93, + 0x91, 0x64, 0x1e, 0x06, 0xfc, 0x26, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xa9, 0x71, 0x49, 0x76, 0xfa, + 0x0d, 0x01, 0x3f, 0x10, 0xf9, 0x16, 0x24, 0x77, 0x09, 0xc7, 0xaa, 0x24, 0xbd, 0x7d, 0xb2, 0xce, + 0x08, 0xc5, 0x43, 0x2e, 0xb6, 0x07, 0x30, 0x7b, 0x7b, 0x88, 0x05, 0xc6, 0xfe, 0x33, 0x8b, 0x4f, + 0x2c, 0xbd, 0xe9, 0xe8, 0x6d, 0x18, 0xdb, 0x71, 0xa2, 0xda, 0xd6, 0xc2, 0xdd, 0x66, 0xc0, 0x4d, + 0x4e, 0xb2, 0x9f, 0x9e, 0xe9, 0xd4, 0x4f, 0xda, 0x47, 0xc6, 0x0e, 0x9f, 0x2b, 0x09, 0x66, 0x38, + 0xc5, 0x1e, 0xad, 0xc3, 0x20, 0x83, 0xb1, 0x37, 0x8a, 0x61, 0x3b, 0xd1, 0x20, 0xaf, 0x36, 0xe5, + 0xb2, 0xb0, 0x12, 0xf3, 0xc1, 0x3a, 0x53, 0xfb, 0xcb, 0x45, 0xbe, 0xda, 0x99, 0x28, 0xff, 0x14, + 0xf4, 0x37, 0xfd, 0xfa, 0xdc, 0xd2, 0x3c, 0x16, 0xa3, 0xa0, 0x8e, 0x91, 0x0a, 0x07, 0x63, 0x89, + 0x47, 0x97, 0x60, 0x40, 0xfc, 0x94, 0x26, 0x42, 0xb6, 0x37, 0x0b, 0xba, 0x10, 0x2b, 0x2c, 0x7a, + 0x1e, 0xa0, 0x19, 0xf8, 0xbb, 0x6e, 0x9d, 0xc5, 0x2c, 0x29, 0x9a, 0xde, 0x46, 0x15, 0x85, 0xc1, + 0x1a, 0x15, 0x7a, 0x15, 0x86, 0x5b, 0x5e, 0xc8, 0xc5, 0x11, 0x2d, 0x32, 0xb4, 0xf2, 0x83, 0xb9, + 0xa9, 0x23, 0xb1, 0x49, 0x8b, 0x66, 0xa0, 0x2f, 0x72, 0x98, 0xf7, 0x4c, 0x6f, 0xbe, 0x53, 0xf0, + 0x1a, 0xa5, 0xd0, 0x93, 0x21, 0xd1, 0x02, 0x58, 0x14, 0x44, 0x6f, 0xca, 0xe7, 0xc2, 0x7c, 0x63, + 0x17, 0xde, 0xf8, 0xdd, 0x1d, 0x02, 0xda, 0x63, 0x61, 0xe1, 0xe5, 0x6f, 0xf0, 0x42, 0xaf, 0x00, + 0x90, 0xbb, 0x11, 0x09, 0x3c, 0xa7, 0xa1, 0x7c, 0xde, 0x94, 0x5c, 0x30, 0xef, 0xaf, 0xfa, 0xd1, + 0xcd, 0x90, 0x2c, 0x28, 0x0a, 0xac, 0x51, 0xdb, 0xbf, 0x53, 0x02, 0x88, 0xe5, 0x76, 0x74, 0x2f, + 0xb5, 0x71, 0x3d, 0xdb, 0x5e, 0xd2, 0x3f, 0xba, 0x5d, 0x0b, 0x7d, 0x9f, 0x05, 0x83, 0x22, 0x34, + 0x0b, 0x1b, 0xa1, 0x42, 0xfb, 0x8d, 0xd3, 0x8c, 0x10, 0x43, 0x4b, 0xf0, 0x26, 0xbc, 0x20, 0x67, + 0xa8, 0x86, 0xe9, 0xd8, 0x0a, 0xbd, 0x62, 0xf4, 0x7e, 0x79, 0x55, 0x2c, 0x1a, 0x5d, 0xa9, 0xae, + 0x8a, 0x25, 0x76, 0x46, 0xe8, 0xb7, 0xc4, 0x9b, 0xc6, 0x2d, 0xb1, 0x27, 0xff, 0x3d, 0xa4, 0x21, + 0xbe, 0x76, 0xba, 0x20, 0xa2, 0x8a, 0x1e, 0x1b, 0xa1, 0x37, 0xff, 0x11, 0x9f, 0x76, 0x4f, 0xea, + 0x10, 0x17, 0xe1, 0x33, 0x30, 0x5a, 0x37, 0x85, 0x00, 0x31, 0x13, 0x9f, 0xcc, 0xe3, 0x9b, 0x90, + 0x19, 0xe2, 0x63, 0x3f, 0x81, 0xc0, 0x49, 0xc6, 0xa8, 0xc2, 0x43, 0x65, 0x2c, 0x79, 0x1b, 0xbe, + 0x78, 0x11, 0x62, 0xe7, 0x8e, 0xe5, 0x5e, 0x18, 0x91, 0x1d, 0x4a, 0x19, 0x9f, 0xee, 0xab, 0xa2, + 0x2c, 0x56, 0x5c, 0xd0, 0xeb, 0xd0, 0xc7, 0x5e, 0x71, 0x85, 0x93, 0x03, 0xf9, 0x1a, 0x67, 0x33, + 0x66, 0x60, 0xbc, 0x20, 0xd9, 0xdf, 0x10, 0x0b, 0x0e, 0xe8, 0x9a, 0x7c, 0x23, 0x19, 0x2e, 0x79, + 0x37, 0x43, 0xc2, 0xde, 0x48, 0x96, 0x66, 0x1f, 0x8f, 0x9f, 0x3f, 0x72, 0x78, 0x66, 0xca, 0x44, + 0xa3, 0x24, 0x95, 0xa2, 0xc4, 0x7f, 0x99, 0x89, 0x51, 0x44, 0x38, 0xca, 0x6c, 0x9e, 0x99, 0xad, + 0x31, 0xee, 0xce, 0x5b, 0x26, 0x0b, 0x9c, 0xe4, 0x49, 0x25, 0x52, 0xbe, 0xea, 0xc5, 0x9b, 0x92, + 0x4e, 0x7b, 0x07, 0xbf, 0x88, 0xb3, 0xd3, 0x88, 0x43, 0xb0, 0x28, 0x7f, 0xac, 0xe2, 0xc1, 0x94, + 0x07, 0x63, 0xc9, 0x25, 0xfa, 0x50, 0xc5, 0x91, 0x3f, 0xe9, 0x81, 0x11, 0x73, 0x4a, 0xa1, 0xcb, + 0x50, 0x12, 0x4c, 0x54, 0x36, 0x13, 0xb5, 0x4a, 0x56, 0x24, 0x02, 0xc7, 0x34, 0x2c, 0x89, 0x0d, + 0x2b, 0xae, 0x39, 0x11, 0xc7, 0x49, 0x6c, 0x14, 0x06, 0x6b, 0x54, 0xf4, 0x62, 0xb5, 0xee, 0xfb, + 0x91, 0x3a, 0x90, 0xd4, 0xbc, 0x9b, 0x65, 0x50, 0x2c, 0xb0, 0xf4, 0x20, 0xda, 0x26, 0x81, 0x47, + 0x1a, 0x66, 0x14, 0x71, 0x75, 0x10, 0x5d, 0xd7, 0x91, 0xd8, 0xa4, 0xa5, 0xc7, 0xa9, 0x1f, 0xb2, + 0x89, 0x2c, 0xae, 0x6f, 0xb1, 0x53, 0x76, 0x95, 0x3f, 0x2f, 0x97, 0x78, 0xf4, 0x71, 0x38, 0xad, + 0x22, 0x76, 0x61, 0x6e, 0xcd, 0x90, 0x35, 0xf6, 0x19, 0xda, 0x96, 0xd3, 0x73, 0xd9, 0x64, 0x38, + 0xaf, 0x3c, 0x7a, 0x0d, 0x46, 0x84, 0x88, 0x2f, 0x39, 0xf6, 0x9b, 0x1e, 0x46, 0xd7, 0x0d, 0x2c, + 0x4e, 0x50, 0xcb, 0x38, 0xe8, 0x4c, 0xca, 0x96, 0x1c, 0x06, 0xd2, 0x71, 0xd0, 0x75, 0x3c, 0x4e, + 0x95, 0x40, 0x33, 0x30, 0xca, 0x65, 0x30, 0xd7, 0xdb, 0xe4, 0x63, 0x22, 0x9e, 0x7c, 0xa9, 0x25, + 0x75, 0xc3, 0x44, 0xe3, 0x24, 0x3d, 0x7a, 0x19, 0x86, 0x9c, 0xa0, 0xb6, 0xe5, 0x46, 0xa4, 0x16, + 0xb5, 0x02, 0xfe, 0x16, 0x4c, 0x73, 0xd1, 0x9a, 0xd1, 0x70, 0xd8, 0xa0, 0xb4, 0xef, 0xc1, 0x44, + 0x46, 0xdc, 0x09, 0x3a, 0x71, 0x9c, 0xa6, 0x2b, 0xbf, 0x29, 0xe1, 0x07, 0x3d, 0x53, 0x59, 0x92, + 0x5f, 0xa3, 0x51, 0xd1, 0xd9, 0xc9, 0xe2, 0x53, 0x68, 0x89, 0x57, 0xd5, 0xec, 0x5c, 0x94, 0x08, + 0x1c, 0xd3, 0xd8, 0xff, 0xa9, 0x00, 0xa3, 0x19, 0xb6, 0x15, 0x96, 0xfc, 0x33, 0x71, 0x49, 0x89, + 0x73, 0x7d, 0x9a, 0x61, 0xf5, 0x0b, 0x87, 0x08, 0xab, 0x5f, 0xec, 0x14, 0x56, 0xbf, 0xe7, 0x9d, + 0x84, 0xd5, 0x37, 0x7b, 0xac, 0xb7, 0xab, 0x1e, 0xcb, 0x08, 0xc5, 0xdf, 0x77, 0xc8, 0x50, 0xfc, + 0x46, 0xa7, 0xf7, 0x77, 0xd1, 0xe9, 0x3f, 0x52, 0x80, 0xb1, 0xa4, 0x2b, 0xe9, 0x31, 0xe8, 0x6d, + 0x5f, 0x37, 0xf4, 0xb6, 0x97, 0xba, 0x79, 0xa2, 0x9b, 0xab, 0xc3, 0xc5, 0x09, 0x1d, 0xee, 0xd3, + 0x5d, 0x71, 0x6b, 0xaf, 0xcf, 0xfd, 0xc9, 0x02, 0x9c, 0xcc, 0x7c, 0x23, 0x7c, 0x0c, 0x7d, 0x73, + 0xc3, 0xe8, 0x9b, 0xe7, 0xba, 0x7e, 0xbe, 0x9c, 0xdb, 0x41, 0xb7, 0x13, 0x1d, 0x74, 0xb9, 0x7b, + 0x96, 0xed, 0x7b, 0xe9, 0xeb, 0x45, 0x38, 0x9f, 0x59, 0x2e, 0x56, 0x7b, 0x2e, 0x1a, 0x6a, 0xcf, + 0xe7, 0x13, 0x6a, 0x4f, 0xbb, 0x7d, 0xe9, 0xa3, 0xd1, 0x83, 0x8a, 0x67, 0xbc, 0x2c, 0x18, 0xc1, + 0x03, 0xea, 0x40, 0x8d, 0x67, 0xbc, 0x8a, 0x11, 0x36, 0xf9, 0x7e, 0x3b, 0xe9, 0x3e, 0x7f, 0xdb, + 0x82, 0x33, 0x99, 0x63, 0x73, 0x0c, 0xba, 0xae, 0x55, 0x53, 0xd7, 0xf5, 0x54, 0xd7, 0xb3, 0x35, + 0x47, 0xf9, 0xf5, 0xd3, 0xbd, 0x39, 0xdf, 0xc2, 0x6e, 0xf2, 0x37, 0x60, 0xd0, 0xa9, 0xd5, 0x48, + 0x18, 0xae, 0xf8, 0x75, 0x15, 0x81, 0xfb, 0x39, 0x76, 0xcf, 0x8a, 0xc1, 0x07, 0xfb, 0xe5, 0xa9, + 0x24, 0x8b, 0x18, 0x8d, 0x75, 0x0e, 0xe8, 0x93, 0x30, 0x10, 0x8a, 0x73, 0x53, 0x8c, 0xfd, 0x0b, + 0x5d, 0x76, 0x8e, 0xb3, 0x4e, 0x1a, 0x66, 0xa8, 0x27, 0xa5, 0xa9, 0x50, 0x2c, 0xcd, 0xb0, 0x30, + 0x85, 0x23, 0x0d, 0x0b, 0xf3, 0x3c, 0xc0, 0xae, 0xba, 0x0c, 0x24, 0xf5, 0x0f, 0xda, 0x35, 0x41, + 0xa3, 0x42, 0x1f, 0x85, 0xb1, 0x90, 0xc7, 0x42, 0x9c, 0x6b, 0x38, 0x21, 0x7b, 0x6d, 0x23, 0x66, + 0x21, 0x0b, 0x27, 0x55, 0x4d, 0xe0, 0x70, 0x8a, 0x1a, 0x2d, 0xca, 0x5a, 0x59, 0xe0, 0x46, 0x3e, + 0x31, 0x2f, 0xc6, 0x35, 0x8a, 0xd4, 0xe3, 0x27, 0x92, 0xdd, 0xcf, 0x3a, 0x5e, 0x2b, 0x89, 0x3e, + 0x09, 0x40, 0xa7, 0x8f, 0xd0, 0x43, 0xf4, 0xe7, 0x6f, 0x9e, 0x74, 0x57, 0xa9, 0x67, 0x3a, 0x37, + 0xb3, 0x97, 0xb7, 0xf3, 0x8a, 0x09, 0xd6, 0x18, 0x22, 0x07, 0x86, 0xe3, 0x7f, 0x71, 0x66, 0xde, + 0x4b, 0xb9, 0x35, 0x24, 0x99, 0x33, 0x95, 0xf7, 0xbc, 0xce, 0x02, 0x9b, 0x1c, 0xed, 0x7f, 0x37, + 0x00, 0x8f, 0xb4, 0xd9, 0x86, 0xd1, 0x8c, 0x69, 0xea, 0x7d, 0x26, 0x79, 0x7f, 0x9f, 0xca, 0x2c, + 0x6c, 0x5c, 0xe8, 0x13, 0xb3, 0xbd, 0xf0, 0x8e, 0x67, 0xfb, 0x0f, 0x59, 0x9a, 0x66, 0x85, 0x3b, + 0x95, 0x7e, 0xe4, 0x90, 0xc7, 0xcb, 0x11, 0xaa, 0x5a, 0x36, 0x32, 0xf4, 0x15, 0xcf, 0x77, 0xdd, + 0x9c, 0xee, 0x15, 0x18, 0x5f, 0xcd, 0x0e, 0x00, 0xcc, 0x55, 0x19, 0x57, 0x0f, 0xfb, 0xfd, 0xc7, + 0x15, 0x0c, 0xf8, 0xf7, 0x2d, 0x38, 0x93, 0x02, 0xf3, 0x36, 0x90, 0x50, 0xc4, 0xa8, 0x5a, 0x7d, + 0xc7, 0x8d, 0x97, 0x0c, 0xf9, 0x37, 0x5c, 0x13, 0xdf, 0x70, 0x26, 0x97, 0x2e, 0xd9, 0xf4, 0x1f, + 0xfc, 0xa3, 0xf2, 0x04, 0xab, 0xc0, 0x24, 0xc4, 0xf9, 0x4d, 0x3f, 0xde, 0x8b, 0xff, 0x37, 0x27, + 0xf6, 0xf1, 0xd4, 0x32, 0x9c, 0x6f, 0xdf, 0xd5, 0x87, 0x7a, 0x9e, 0xfc, 0x7b, 0x16, 0x9c, 0x6b, + 0x1b, 0x03, 0xe7, 0x5b, 0x50, 0xce, 0xb5, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x25, 0x0c, 0xef, 0xb8, + 0xcb, 0x50, 0xaa, 0x25, 0xf2, 0xa1, 0xc6, 0xd1, 0x20, 0x54, 0x2e, 0xd4, 0x98, 0xc6, 0x70, 0x82, + 0x2b, 0x74, 0x74, 0x82, 0xfb, 0x0d, 0x0b, 0x52, 0x67, 0xd5, 0x31, 0x08, 0x4d, 0x4b, 0xa6, 0xd0, + 0xf4, 0x78, 0x37, 0xbd, 0x99, 0x23, 0x2f, 0xfd, 0xc5, 0x28, 0x9c, 0xca, 0x79, 0x5d, 0xb8, 0x0b, + 0xe3, 0x9b, 0x35, 0x62, 0x3e, 0x27, 0x6f, 0x17, 0x66, 0xa9, 0xed, 0xdb, 0x73, 0x9e, 0x86, 0x36, + 0x45, 0x82, 0xd3, 0x55, 0xa0, 0xcf, 0x5b, 0x70, 0xc2, 0xb9, 0x13, 0x2e, 0x50, 0xe1, 0xd7, 0xad, + 0xcd, 0x36, 0xfc, 0xda, 0x36, 0x95, 0x2c, 0xe4, 0xb2, 0x7a, 0x31, 0x53, 0x21, 0x79, 0xbb, 0x9a, + 0xa2, 0x37, 0xaa, 0x67, 0x49, 0xc7, 0xb3, 0xa8, 0x70, 0x66, 0x5d, 0x08, 0x8b, 0xfc, 0x28, 0xf4, + 0x6a, 0xdd, 0x26, 0xe0, 0x41, 0xd6, 0x33, 0x50, 0x2e, 0xcd, 0x49, 0x0c, 0x56, 0x7c, 0xd0, 0xa7, + 0xa1, 0xb4, 0x29, 0xdf, 0x36, 0x67, 0x48, 0x8b, 0x71, 0x47, 0xb6, 0x7f, 0xf1, 0xcd, 0xbd, 0x0a, + 0x14, 0x11, 0x8e, 0x99, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x84, 0xed, 0xf2, 0x76, 0x27, 0xdc, 0x47, + 0x79, 0x58, 0x91, 0xd5, 0xc5, 0x2a, 0xa6, 0x05, 0xd1, 0x35, 0x28, 0x06, 0xeb, 0x75, 0xa1, 0x4d, + 0xcf, 0x5c, 0xa4, 0x78, 0x76, 0x3e, 0xa7, 0x55, 0x8c, 0x13, 0x9e, 0x9d, 0xc7, 0x94, 0x05, 0xaa, + 0x40, 0x2f, 0x7b, 0x92, 0x27, 0x64, 0xb3, 0xcc, 0x5b, 0x68, 0x9b, 0xa7, 0xad, 0x3c, 0xf6, 0x08, + 0x23, 0xc0, 0x9c, 0x11, 0x5a, 0x83, 0xbe, 0x1a, 0xcb, 0xf1, 0x2c, 0x84, 0xb1, 0xf7, 0x67, 0xea, + 0xcd, 0xdb, 0x24, 0xbf, 0x16, 0x6a, 0x64, 0x46, 0x81, 0x05, 0x2f, 0xc6, 0x95, 0x34, 0xb7, 0x36, + 0x42, 0xa6, 0x77, 0xcb, 0xe3, 0xda, 0x26, 0xa7, 0xbb, 0xe0, 0xca, 0x28, 0xb0, 0xe0, 0x85, 0x5e, + 0x81, 0xc2, 0x46, 0x4d, 0x3c, 0xb7, 0xcb, 0x54, 0xa0, 0x9b, 0x91, 0x61, 0x66, 0xfb, 0xee, 0xef, + 0x97, 0x0b, 0x8b, 0x73, 0xb8, 0xb0, 0x51, 0x43, 0xab, 0xd0, 0xbf, 0xc1, 0x63, 0x49, 0x08, 0x1d, + 0xf9, 0x93, 0xd9, 0x61, 0x2e, 0x52, 0xe1, 0x26, 0xf8, 0xd3, 0x2d, 0x81, 0xc0, 0x92, 0x09, 0x4b, + 0xd7, 0xa1, 0x62, 0x62, 0x88, 0x90, 0x7c, 0xd3, 0x87, 0x8b, 0x63, 0xc2, 0x65, 0xe5, 0x38, 0xb2, + 0x06, 0xd6, 0x38, 0xd2, 0x59, 0xed, 0xdc, 0x6b, 0x05, 0x2c, 0x5e, 0xbb, 0x88, 0xdd, 0x94, 0x39, + 0xab, 0x67, 0x24, 0x51, 0xbb, 0x59, 0xad, 0x88, 0x70, 0xcc, 0x14, 0x6d, 0xc3, 0xf0, 0x6e, 0xd8, + 0xdc, 0x22, 0x72, 0x49, 0xb3, 0x50, 0x4e, 0x39, 0xb2, 0xde, 0x2d, 0x41, 0xe8, 0x06, 0x51, 0xcb, + 0x69, 0xa4, 0x76, 0x21, 0x26, 0x97, 0xdf, 0xd2, 0x99, 0x61, 0x93, 0x37, 0xed, 0xfe, 0xb7, 0x5b, + 0xfe, 0xfa, 0x5e, 0x44, 0x44, 0x24, 0xbd, 0xcc, 0xee, 0x7f, 0x83, 0x93, 0xa4, 0xbb, 0x5f, 0x20, + 0xb0, 0x64, 0x82, 0x6e, 0x89, 0xee, 0x61, 0xbb, 0xe7, 0x58, 0x7e, 0x98, 0xde, 0x19, 0x49, 0x94, + 0xd3, 0x29, 0x6c, 0xb7, 0x8c, 0x59, 0xb1, 0x5d, 0xb2, 0xb9, 0xe5, 0x47, 0xbe, 0x97, 0xd8, 0xa1, + 0xc7, 0xf3, 0x77, 0xc9, 0x4a, 0x06, 0x7d, 0x7a, 0x97, 0xcc, 0xa2, 0xc2, 0x99, 0x75, 0xa1, 0x3a, + 0x8c, 0x34, 0xfd, 0x20, 0xba, 0xe3, 0x07, 0x72, 0x7e, 0xa1, 0x36, 0x3a, 0x3e, 0x83, 0x52, 0xd4, + 0xc8, 0x82, 0x54, 0x9a, 0x18, 0x9c, 0xe0, 0x89, 0x3e, 0x06, 0xfd, 0x61, 0xcd, 0x69, 0x90, 0xa5, + 0x1b, 0x93, 0x13, 0xf9, 0xc7, 0x4f, 0x95, 0x93, 0xe4, 0xcc, 0x2e, 0x1e, 0x0a, 0x84, 0x93, 0x60, + 0xc9, 0x0e, 0x2d, 0x42, 0x2f, 0x4b, 0x83, 0xc9, 0xc2, 0x3e, 0xe6, 0x44, 0x1b, 0x4e, 0x39, 0xf3, + 0xf3, 0xbd, 0x89, 0x81, 0x31, 0x2f, 0x4e, 0xd7, 0x80, 0xb8, 0xea, 0xfa, 0xe1, 0xe4, 0xc9, 0xfc, + 0x35, 0x20, 0x6e, 0xc8, 0x37, 0xaa, 0xed, 0xd6, 0x80, 0x22, 0xc2, 0x31, 0x53, 0xba, 0x33, 0xd3, + 0xdd, 0xf4, 0x54, 0x1b, 0x2f, 0xb4, 0xdc, 0xbd, 0x94, 0xed, 0xcc, 0x74, 0x27, 0xa5, 0x2c, 0xec, + 0x3f, 0xee, 0x4f, 0xcb, 0x2c, 0x4c, 0x39, 0xf2, 0xbf, 0x5b, 0x29, 0xbb, 0xf9, 0x07, 0xba, 0xd5, + 0xd5, 0x1e, 0xe1, 0xb5, 0xee, 0xf3, 0x16, 0x9c, 0x6a, 0x66, 0x7e, 0x88, 0x10, 0x00, 0xba, 0x53, + 0xf9, 0xf2, 0x4f, 0x57, 0x21, 0x42, 0xb3, 0xf1, 0x38, 0xa7, 0xa6, 0xe4, 0xd5, 0xb9, 0xf8, 0x8e, + 0xaf, 0xce, 0x2b, 0x30, 0x50, 0xe3, 0xf7, 0x1c, 0x19, 0xda, 0xba, 0xab, 0x00, 0x77, 0x4c, 0x94, + 0x10, 0x17, 0xa4, 0x0d, 0xac, 0x58, 0xa0, 0x1f, 0xb6, 0xe0, 0x5c, 0xb2, 0xe9, 0x98, 0x30, 0xb4, + 0x88, 0x2b, 0xca, 0xf5, 0x32, 0x8b, 0xe2, 0xfb, 0x53, 0xf2, 0xbf, 0x41, 0x7c, 0xd0, 0x89, 0x00, + 0xb7, 0xaf, 0x0c, 0xcd, 0x67, 0x28, 0x86, 0xfa, 0x4c, 0x63, 0x58, 0x17, 0xca, 0xa1, 0x17, 0x61, + 0x68, 0xc7, 0x6f, 0x79, 0x91, 0x70, 0x5a, 0x13, 0x0e, 0x34, 0xcc, 0x71, 0x64, 0x45, 0x83, 0x63, + 0x83, 0x2a, 0xa1, 0x52, 0x1a, 0x78, 0x60, 0x95, 0xd2, 0x5b, 0x30, 0xe4, 0x69, 0x5e, 0xd6, 0x42, + 0x1e, 0xb8, 0x98, 0x1f, 0x13, 0x58, 0xf7, 0xc9, 0xe6, 0xad, 0xd4, 0x21, 0xd8, 0xe0, 0x76, 0xbc, + 0xde, 0x6c, 0x3f, 0x5f, 0xc8, 0x10, 0xea, 0xb9, 0x5a, 0xe9, 0xc3, 0xa6, 0x5a, 0xe9, 0x62, 0x52, + 0xad, 0x94, 0x32, 0x84, 0x18, 0x1a, 0xa5, 0xee, 0x53, 0x64, 0x75, 0x1d, 0x57, 0xf4, 0xbb, 0x2d, + 0x38, 0xcd, 0x34, 0xeb, 0xb4, 0x82, 0x77, 0xac, 0x4d, 0x7f, 0xe4, 0xfe, 0x7e, 0xf9, 0xf4, 0x72, + 0x36, 0x3b, 0x9c, 0x57, 0x8f, 0xdd, 0x80, 0x0b, 0x9d, 0x8e, 0x46, 0xe6, 0x41, 0x59, 0x57, 0xa6, + 0xf7, 0xd8, 0x83, 0xb2, 0xbe, 0x34, 0x8f, 0x19, 0xa6, 0xdb, 0xa8, 0x59, 0xf6, 0x7f, 0xb0, 0xa0, + 0x58, 0xf1, 0xeb, 0xc7, 0x70, 0xe9, 0xfe, 0x88, 0x71, 0xe9, 0x7e, 0x24, 0xfb, 0x50, 0xae, 0xe7, + 0x9a, 0x92, 0x16, 0x12, 0xa6, 0xa4, 0x73, 0x79, 0x0c, 0xda, 0x1b, 0x8e, 0x7e, 0xaa, 0x08, 0x83, + 0x15, 0xbf, 0xae, 0x9e, 0x2f, 0xfc, 0xe3, 0x07, 0x79, 0xbe, 0x90, 0x9b, 0xf4, 0x44, 0xe3, 0xcc, + 0x1c, 0x2f, 0xe5, 0xcb, 0xed, 0x6f, 0xb1, 0x57, 0x0c, 0xb7, 0x89, 0xbb, 0xb9, 0x15, 0x91, 0x7a, + 0xf2, 0x73, 0x8e, 0xef, 0x15, 0xc3, 0x1f, 0x17, 0x60, 0x34, 0x51, 0x3b, 0x6a, 0xc0, 0x70, 0x43, + 0x37, 0x54, 0x88, 0x79, 0xfa, 0x40, 0x36, 0x0e, 0xe1, 0x05, 0xae, 0x81, 0xb0, 0xc9, 0x1c, 0x4d, + 0x03, 0x28, 0xcb, 0xbd, 0x54, 0x57, 0xb3, 0x9b, 0x87, 0x32, 0xed, 0x87, 0x58, 0xa3, 0x40, 0x2f, + 0xc1, 0x60, 0xe4, 0x37, 0xfd, 0x86, 0xbf, 0xb9, 0x77, 0x9d, 0xc8, 0x80, 0x6a, 0xca, 0xb7, 0x73, + 0x2d, 0x46, 0x61, 0x9d, 0x0e, 0xdd, 0x85, 0x71, 0xc5, 0xa4, 0x7a, 0x04, 0xc6, 0x1b, 0xa6, 0xd9, + 0x58, 0x4d, 0x72, 0xc4, 0xe9, 0x4a, 0xec, 0x9f, 0x2d, 0xf2, 0x2e, 0xf6, 0x22, 0xf7, 0xbd, 0xd5, + 0xf0, 0xee, 0x5e, 0x0d, 0x5f, 0xb7, 0x60, 0x8c, 0xd6, 0xce, 0x1c, 0xd7, 0xa4, 0xa8, 0xa1, 0x22, + 0xa1, 0x5b, 0x6d, 0x22, 0xa1, 0x5f, 0xa4, 0xbb, 0x66, 0xdd, 0x6f, 0x45, 0x42, 0x7f, 0xa8, 0x6d, + 0x8b, 0x14, 0x8a, 0x05, 0x56, 0xd0, 0x91, 0x20, 0x10, 0x8f, 0x6d, 0x75, 0x3a, 0x12, 0x04, 0x58, + 0x60, 0x65, 0xa0, 0xf4, 0x9e, 0xec, 0x40, 0xe9, 0x3c, 0xde, 0xad, 0x70, 0x71, 0x12, 0x42, 0x9f, + 0x16, 0xef, 0x56, 0xfa, 0x3e, 0xc5, 0x34, 0xf6, 0x57, 0x8b, 0x30, 0x54, 0xf1, 0xeb, 0xb1, 0xd5, + 0xfe, 0x45, 0xc3, 0x6a, 0x7f, 0x21, 0x61, 0xb5, 0x1f, 0xd3, 0x69, 0xdf, 0xb3, 0xd1, 0x7f, 0xb3, + 0x6c, 0xf4, 0xbf, 0x6e, 0xb1, 0x51, 0x9b, 0x5f, 0xad, 0x72, 0x3f, 0x48, 0x74, 0x05, 0x06, 0xd9, + 0x06, 0xc3, 0x5e, 0x77, 0x4b, 0x53, 0x36, 0x4b, 0x5c, 0xb6, 0x1a, 0x83, 0xb1, 0x4e, 0x83, 0x2e, + 0xc1, 0x40, 0x48, 0x9c, 0xa0, 0xb6, 0xa5, 0x76, 0x57, 0x61, 0x77, 0xe6, 0x30, 0xac, 0xb0, 0xe8, + 0x8d, 0x38, 0xd4, 0x6a, 0x31, 0xff, 0xb5, 0xa8, 0xde, 0x1e, 0xbe, 0x44, 0xf2, 0xe3, 0xab, 0xda, + 0xb7, 0x01, 0xa5, 0xe9, 0xbb, 0x08, 0x06, 0x58, 0x36, 0x83, 0x01, 0x96, 0x52, 0x81, 0x00, 0xff, + 0xca, 0x82, 0x91, 0x8a, 0x5f, 0xa7, 0x4b, 0xf7, 0xdb, 0x69, 0x9d, 0xea, 0x71, 0xa6, 0xfb, 0xda, + 0xc4, 0x99, 0x7e, 0x0c, 0x7a, 0x2b, 0x7e, 0xbd, 0x43, 0xc0, 0xc2, 0xbf, 0x61, 0x41, 0x7f, 0xc5, + 0xaf, 0x1f, 0x83, 0x69, 0xe2, 0xc3, 0xa6, 0x69, 0xe2, 0x74, 0xce, 0xbc, 0xc9, 0xb1, 0x46, 0xfc, + 0xff, 0x3d, 0x30, 0x4c, 0xdb, 0xe9, 0x6f, 0xca, 0xa1, 0x34, 0xba, 0xcd, 0xea, 0xa2, 0xdb, 0xa8, + 0x14, 0xee, 0x37, 0x1a, 0xfe, 0x9d, 0xe4, 0xb0, 0x2e, 0x32, 0x28, 0x16, 0x58, 0xf4, 0x2c, 0x0c, + 0x34, 0x03, 0xb2, 0xeb, 0xfa, 0x42, 0xbc, 0xd5, 0x0c, 0x3d, 0x15, 0x01, 0xc7, 0x8a, 0x82, 0x5e, + 0x4d, 0x43, 0xd7, 0xa3, 0x47, 0x79, 0xcd, 0xf7, 0xea, 0x5c, 0x7b, 0x5f, 0x14, 0xc9, 0x50, 0x34, + 0x38, 0x36, 0xa8, 0xd0, 0x6d, 0x28, 0xb1, 0xff, 0x6c, 0xdb, 0x39, 0x7c, 0x1a, 0x66, 0x91, 0x1e, + 0x52, 0x30, 0xc0, 0x31, 0x2f, 0xf4, 0x3c, 0x40, 0x24, 0x13, 0x0a, 0x84, 0x22, 0x70, 0x9d, 0xba, + 0x0a, 0xa8, 0x54, 0x03, 0x21, 0xd6, 0xa8, 0xd0, 0x33, 0x50, 0x8a, 0x1c, 0xb7, 0xb1, 0xec, 0x7a, + 0xcc, 0xfe, 0x4b, 0xdb, 0x2f, 0xb2, 0x34, 0x0a, 0x20, 0x8e, 0xf1, 0x54, 0x14, 0x63, 0x41, 0x4d, + 0x78, 0x12, 0xfa, 0x01, 0x46, 0xcd, 0x44, 0xb1, 0x65, 0x05, 0xc5, 0x1a, 0x05, 0xda, 0x82, 0xb3, + 0xae, 0xc7, 0x12, 0x87, 0x90, 0xea, 0xb6, 0xdb, 0x5c, 0x5b, 0xae, 0xde, 0x22, 0x81, 0xbb, 0xb1, + 0x37, 0xeb, 0xd4, 0xb6, 0x89, 0x27, 0x13, 0xec, 0xca, 0xbc, 0xeb, 0x67, 0x97, 0xda, 0xd0, 0xe2, + 0xb6, 0x9c, 0xec, 0x17, 0xd8, 0x7c, 0xbf, 0x51, 0x45, 0x4f, 0x1b, 0x5b, 0xc7, 0x29, 0x7d, 0xeb, + 0x38, 0xd8, 0x2f, 0xf7, 0xdd, 0xa8, 0x6a, 0x31, 0x39, 0x5e, 0x86, 0x93, 0x15, 0xbf, 0x5e, 0xf1, + 0x83, 0x68, 0xd1, 0x0f, 0xee, 0x38, 0x41, 0x5d, 0x4e, 0xaf, 0xb2, 0x8c, 0x4a, 0x42, 0xf7, 0xcf, + 0x5e, 0xbe, 0xbb, 0x18, 0x11, 0x47, 0x5e, 0x60, 0x12, 0xdb, 0x21, 0xdf, 0xd2, 0xd5, 0x98, 0xec, + 0xa0, 0x52, 0xef, 0x5c, 0x75, 0x22, 0x82, 0x6e, 0xb0, 0x14, 0xfa, 0xf1, 0x31, 0x2a, 0x8a, 0x3f, + 0xa5, 0xa5, 0xd0, 0x8f, 0x91, 0x99, 0xe7, 0xae, 0x59, 0xde, 0xfe, 0x9c, 0xa8, 0x84, 0xeb, 0x01, + 0xb8, 0xbf, 0x62, 0x37, 0x39, 0xa8, 0x65, 0x6e, 0x8e, 0x42, 0x7e, 0x52, 0x07, 0x6e, 0x79, 0x6d, + 0x9b, 0x9b, 0xc3, 0xfe, 0x4e, 0x38, 0x95, 0xac, 0xbe, 0xeb, 0x44, 0xd8, 0x73, 0x30, 0x1e, 0xe8, + 0x05, 0xb5, 0x44, 0x67, 0x27, 0x79, 0x3e, 0x85, 0x04, 0x12, 0xa7, 0xe9, 0xed, 0x97, 0x60, 0x9c, + 0xde, 0x3d, 0x95, 0x20, 0xc7, 0x7a, 0xb9, 0x73, 0x78, 0x96, 0xff, 0xd8, 0xcb, 0x0e, 0xa2, 0x44, + 0xd6, 0x1b, 0xf4, 0x29, 0x18, 0x09, 0xc9, 0xb2, 0xeb, 0xb5, 0xee, 0x4a, 0xed, 0x53, 0x9b, 0x47, + 0xa4, 0xd5, 0x05, 0x9d, 0x92, 0xeb, 0xb0, 0x4d, 0x18, 0x4e, 0x70, 0x43, 0x3b, 0x30, 0x72, 0xc7, + 0xf5, 0xea, 0xfe, 0x9d, 0x50, 0xf2, 0x1f, 0xc8, 0x57, 0x65, 0xdf, 0xe6, 0x94, 0x89, 0x36, 0x1a, + 0xd5, 0xdd, 0x36, 0x98, 0xe1, 0x04, 0x73, 0xba, 0xd8, 0x83, 0x96, 0x37, 0x13, 0xde, 0x0c, 0x09, + 0x7f, 0x16, 0x28, 0x16, 0x3b, 0x96, 0x40, 0x1c, 0xe3, 0xe9, 0x62, 0x67, 0x7f, 0xae, 0x06, 0x7e, + 0x8b, 0xa7, 0x58, 0x11, 0x8b, 0x1d, 0x2b, 0x28, 0xd6, 0x28, 0xe8, 0x66, 0xc8, 0xfe, 0xad, 0xfa, + 0x1e, 0xf6, 0xfd, 0x48, 0x6e, 0x9f, 0x2c, 0x45, 0x98, 0x06, 0xc7, 0x06, 0x15, 0x5a, 0x04, 0x14, + 0xb6, 0x9a, 0xcd, 0x06, 0xf3, 0x4e, 0x73, 0x1a, 0x8c, 0x15, 0x77, 0xdb, 0x29, 0xf2, 0x10, 0xd1, + 0xd5, 0x14, 0x16, 0x67, 0x94, 0xa0, 0xe7, 0xe2, 0x86, 0x68, 0x6a, 0x2f, 0x6b, 0x2a, 0x37, 0x7b, + 0x55, 0x79, 0x3b, 0x25, 0x0e, 0x2d, 0x40, 0x7f, 0xb8, 0x17, 0xd6, 0xa2, 0x46, 0xd8, 0x2e, 0x21, + 0x5b, 0x95, 0x91, 0x68, 0xf9, 0x40, 0x79, 0x11, 0x2c, 0xcb, 0xa2, 0x1a, 0x4c, 0x08, 0x8e, 0x73, + 0x5b, 0x8e, 0xa7, 0xd2, 0x44, 0x71, 0x27, 0xfd, 0x2b, 0xf7, 0xf7, 0xcb, 0x13, 0xa2, 0x66, 0x1d, + 0x7d, 0xb0, 0x5f, 0xa6, 0x8b, 0x23, 0x03, 0x83, 0xb3, 0xb8, 0xf1, 0xc9, 0x57, 0xab, 0xf9, 0x3b, + 0xcd, 0x4a, 0xe0, 0x6f, 0xb8, 0x0d, 0xd2, 0xce, 0x74, 0x58, 0x35, 0x28, 0xc5, 0xe4, 0x33, 0x60, + 0x38, 0xc1, 0xcd, 0xfe, 0x1c, 0x93, 0x1d, 0xab, 0xee, 0xa6, 0xe7, 0x44, 0xad, 0x80, 0xa0, 0x1d, + 0x18, 0x6e, 0xb2, 0xdd, 0x45, 0x24, 0x3e, 0x11, 0x73, 0xfd, 0xc5, 0x2e, 0xd5, 0x4f, 0x77, 0x58, + 0xea, 0x36, 0xc3, 0xd5, 0xad, 0xa2, 0xb3, 0xc3, 0x26, 0x77, 0xfb, 0x5f, 0x9c, 0x61, 0xd2, 0x47, + 0x95, 0xeb, 0x94, 0xfa, 0xc5, 0x9b, 0x20, 0x71, 0x8d, 0x9d, 0xca, 0x57, 0xb0, 0xc6, 0xc3, 0x22, + 0xde, 0x15, 0x61, 0x59, 0x16, 0x7d, 0x12, 0x46, 0xe8, 0xad, 0x50, 0x49, 0x00, 0xe1, 0xe4, 0x89, + 0xfc, 0xd8, 0x2d, 0x8a, 0x4a, 0x4f, 0x8a, 0xa4, 0x17, 0xc6, 0x09, 0x66, 0xe8, 0x0d, 0xe6, 0x5a, + 0x26, 0x59, 0x17, 0xba, 0x61, 0xad, 0x7b, 0x91, 0x49, 0xb6, 0x1a, 0x13, 0xd4, 0x82, 0x89, 0x74, + 0xea, 0xc7, 0x70, 0xd2, 0xce, 0x17, 0xaf, 0xd3, 0xd9, 0x1b, 0xe3, 0xec, 0x35, 0x69, 0x5c, 0x88, + 0xb3, 0xf8, 0xa3, 0xe5, 0x64, 0x62, 0xbe, 0xa2, 0xa1, 0xf7, 0x4d, 0x25, 0xe7, 0x1b, 0x6e, 0x9b, + 0x93, 0x6f, 0x13, 0xce, 0x69, 0xb9, 0xcd, 0xae, 0x06, 0x0e, 0x73, 0xde, 0x70, 0xd9, 0x76, 0xaa, + 0xc9, 0x45, 0x8f, 0xde, 0xdf, 0x2f, 0x9f, 0x5b, 0x6b, 0x47, 0x88, 0xdb, 0xf3, 0x41, 0x37, 0xe0, + 0x24, 0x8f, 0x3c, 0x30, 0x4f, 0x9c, 0x7a, 0xc3, 0xf5, 0x94, 0xe0, 0xc5, 0x97, 0xfc, 0x99, 0xfb, + 0xfb, 0xe5, 0x93, 0x33, 0x59, 0x04, 0x38, 0xbb, 0x1c, 0xfa, 0x30, 0x94, 0xea, 0x5e, 0x28, 0xfa, + 0xa0, 0xcf, 0x48, 0x1f, 0x57, 0x9a, 0x5f, 0xad, 0xaa, 0xef, 0x8f, 0xff, 0xe0, 0xb8, 0x00, 0xda, + 0xe4, 0xb6, 0x01, 0xa5, 0x2d, 0xea, 0x4f, 0x45, 0x5e, 0x4b, 0x2a, 0x54, 0x8d, 0xb7, 0xc7, 0xdc, + 0x28, 0xa6, 0x9e, 0xe4, 0x18, 0xcf, 0x92, 0x0d, 0xc6, 0xe8, 0x75, 0x40, 0x22, 0x4d, 0xc1, 0x4c, + 0x8d, 0x65, 0xd5, 0x61, 0x47, 0xe3, 0x80, 0xf9, 0x1a, 0xb6, 0x9a, 0xa2, 0xc0, 0x19, 0xa5, 0xd0, + 0x35, 0xba, 0xab, 0xe8, 0x50, 0xb1, 0x6b, 0xa9, 0x24, 0xa5, 0xf3, 0xa4, 0x19, 0x10, 0xe6, 0x63, + 0x66, 0x72, 0xc4, 0x89, 0x72, 0xa8, 0x0e, 0x67, 0x9d, 0x56, 0xe4, 0x33, 0xb3, 0x8b, 0x49, 0xba, + 0xe6, 0x6f, 0x13, 0x8f, 0x59, 0x3c, 0x07, 0x66, 0x2f, 0x50, 0xc9, 0x6e, 0xa6, 0x0d, 0x1d, 0x6e, + 0xcb, 0x85, 0x4a, 0xe4, 0x2a, 0x2b, 0x39, 0x98, 0xf1, 0xe4, 0x32, 0x32, 0x93, 0xbf, 0x04, 0x83, + 0x5b, 0x7e, 0x18, 0xad, 0x92, 0xe8, 0x8e, 0x1f, 0x6c, 0x8b, 0xb8, 0xc8, 0x71, 0x2c, 0xfa, 0x18, + 0x85, 0x75, 0x3a, 0x7a, 0xe5, 0x66, 0xfe, 0x38, 0x4b, 0xf3, 0xcc, 0x15, 0x62, 0x20, 0xde, 0x63, + 0xae, 0x71, 0x30, 0x96, 0x78, 0x49, 0xba, 0x54, 0x99, 0x63, 0x6e, 0x0d, 0x09, 0xd2, 0xa5, 0xca, + 0x1c, 0x96, 0x78, 0x3a, 0x5d, 0xc3, 0x2d, 0x27, 0x20, 0x95, 0xc0, 0xaf, 0x91, 0x50, 0xcb, 0x80, + 0xf0, 0x08, 0x8f, 0xfa, 0x4c, 0xa7, 0x6b, 0x35, 0x8b, 0x00, 0x67, 0x97, 0x43, 0x24, 0x9d, 0xd7, + 0x6f, 0x24, 0xdf, 0x1e, 0x95, 0x96, 0x67, 0xba, 0x4c, 0xed, 0xe7, 0xc1, 0x98, 0xca, 0x28, 0xc8, + 0xe3, 0x3c, 0x87, 0x93, 0xa3, 0x6c, 0x6e, 0x77, 0x1f, 0x24, 0x5a, 0x59, 0xf8, 0x96, 0x12, 0x9c, + 0x70, 0x8a, 0xb7, 0x11, 0x32, 0x70, 0xac, 0x63, 0xc8, 0xc0, 0xcb, 0x50, 0x0a, 0x5b, 0xeb, 0x75, + 0x7f, 0xc7, 0x71, 0x3d, 0xe6, 0xd6, 0xa0, 0xdd, 0xfd, 0xaa, 0x12, 0x81, 0x63, 0x1a, 0xb4, 0x08, + 0x03, 0x8e, 0x34, 0xdf, 0xa1, 0xfc, 0x20, 0x51, 0xca, 0x68, 0xc7, 0xe3, 0xa6, 0x48, 0x83, 0x9d, + 0x2a, 0x8b, 0x5e, 0x85, 0x61, 0xf1, 0x72, 0x5e, 0x24, 0xe1, 0x9d, 0x30, 0x9f, 0x37, 0x56, 0x75, + 0x24, 0x36, 0x69, 0xd1, 0x4d, 0x18, 0x8c, 0xfc, 0x06, 0x7b, 0xa3, 0x47, 0xc5, 0xbc, 0x53, 0xf9, + 0xe1, 0x0e, 0xd7, 0x14, 0x99, 0xae, 0xb5, 0x56, 0x45, 0xb1, 0xce, 0x07, 0xad, 0xf1, 0xf9, 0xce, + 0xf2, 0x1d, 0x90, 0x50, 0x64, 0x71, 0x3d, 0x97, 0xe7, 0x93, 0xc6, 0xc8, 0xcc, 0xe5, 0x20, 0x4a, + 0x62, 0x9d, 0x0d, 0xba, 0x0a, 0xe3, 0xcd, 0xc0, 0xf5, 0xd9, 0x9c, 0x50, 0x96, 0xdb, 0x49, 0x33, + 0xbb, 0x59, 0x25, 0x49, 0x80, 0xd3, 0x65, 0x58, 0xe0, 0x03, 0x01, 0x9c, 0x3c, 0xc3, 0x33, 0xb4, + 0xf0, 0xab, 0x34, 0x87, 0x61, 0x85, 0x45, 0x2b, 0x6c, 0x27, 0xe6, 0x5a, 0xa0, 0xc9, 0xa9, 0xfc, + 0xb8, 0x54, 0xba, 0xb6, 0x88, 0x0b, 0xaf, 0xea, 0x2f, 0x8e, 0x39, 0xa0, 0xba, 0x96, 0x18, 0x95, + 0x5e, 0x01, 0xc2, 0xc9, 0xb3, 0x6d, 0x9c, 0x22, 0x13, 0xb7, 0xb2, 0x58, 0x20, 0x30, 0xc0, 0x21, + 0x4e, 0xf0, 0x44, 0x1f, 0x85, 0x31, 0x11, 0x4d, 0x33, 0xee, 0xa6, 0x73, 0xf1, 0xcb, 0x07, 0x9c, + 0xc0, 0xe1, 0x14, 0x35, 0xcf, 0x90, 0xe2, 0xac, 0x37, 0x88, 0xd8, 0xfa, 0x96, 0x5d, 0x6f, 0x3b, + 0x9c, 0x3c, 0xcf, 0xf6, 0x07, 0x91, 0x21, 0x25, 0x89, 0xc5, 0x19, 0x25, 0xd0, 0x1a, 0x8c, 0x35, + 0x03, 0x42, 0x76, 0x98, 0xa0, 0x2f, 0xce, 0xb3, 0x32, 0x8f, 0xfb, 0x41, 0x5b, 0x52, 0x49, 0xe0, + 0x0e, 0x32, 0x60, 0x38, 0xc5, 0x01, 0xdd, 0x81, 0x01, 0x7f, 0x97, 0x04, 0x5b, 0xc4, 0xa9, 0x4f, + 0x5e, 0x68, 0xf3, 0x12, 0x47, 0x1c, 0x6e, 0x37, 0x04, 0x6d, 0xc2, 0xdb, 0x43, 0x82, 0x3b, 0x7b, + 0x7b, 0xc8, 0xca, 0xd0, 0xff, 0x61, 0xc1, 0x19, 0x69, 0x9c, 0xa9, 0x36, 0x69, 0xaf, 0xcf, 0xf9, + 0x5e, 0x18, 0x05, 0x3c, 0x52, 0xc5, 0xa3, 0xf9, 0xd1, 0x1b, 0xd6, 0x72, 0x0a, 0x29, 0x45, 0xf4, + 0x99, 0x3c, 0x8a, 0x10, 0xe7, 0xd7, 0x48, 0xaf, 0xa6, 0x21, 0x89, 0xe4, 0x66, 0x34, 0x13, 0x2e, + 0xbe, 0x31, 0xbf, 0x3a, 0xf9, 0x18, 0x0f, 0xb3, 0x41, 0x17, 0x43, 0x35, 0x89, 0xc4, 0x69, 0x7a, + 0x74, 0x05, 0x0a, 0x7e, 0x38, 0xf9, 0x78, 0x9b, 0x5c, 0xba, 0x7e, 0xfd, 0x46, 0x95, 0x7b, 0xfd, + 0xdd, 0xa8, 0xe2, 0x82, 0x1f, 0xca, 0x2c, 0x25, 0xf4, 0x3e, 0x16, 0x4e, 0x3e, 0xc1, 0xd5, 0x96, + 0x32, 0x4b, 0x09, 0x03, 0xe2, 0x18, 0x8f, 0xb6, 0x60, 0x34, 0x34, 0xee, 0xbd, 0xe1, 0xe4, 0x45, + 0xd6, 0x53, 0x4f, 0xe4, 0x0d, 0x9a, 0x41, 0xad, 0xa5, 0x0f, 0x30, 0xb9, 0xe0, 0x24, 0x5b, 0xbe, + 0xba, 0xb4, 0x9b, 0x77, 0x38, 0xf9, 0x64, 0x87, 0xd5, 0xa5, 0x11, 0xeb, 0xab, 0x4b, 0xe7, 0x81, + 0x13, 0x3c, 0xa7, 0xbe, 0x03, 0xc6, 0x53, 0xe2, 0xd2, 0x61, 0x3c, 0xdc, 0xa7, 0xb6, 0x61, 0xd8, + 0x98, 0x92, 0x0f, 0xd5, 0xbb, 0xe2, 0xb7, 0x4b, 0x50, 0x52, 0x56, 0x6f, 0x74, 0xd9, 0x74, 0xa8, + 0x38, 0x93, 0x74, 0xa8, 0x18, 0xa8, 0xf8, 0x75, 0xc3, 0x87, 0x62, 0x2d, 0x23, 0x18, 0x63, 0xde, + 0x06, 0xd8, 0xfd, 0x23, 0x15, 0xcd, 0x94, 0x50, 0xec, 0xda, 0x33, 0xa3, 0xa7, 0xad, 0x75, 0xe2, + 0x2a, 0x8c, 0x7b, 0x3e, 0x93, 0xd1, 0x49, 0x5d, 0x0a, 0x60, 0x4c, 0xce, 0x2a, 0xe9, 0xd1, 0x8d, + 0x12, 0x04, 0x38, 0x5d, 0x86, 0x56, 0xc8, 0x05, 0xa5, 0xa4, 0x39, 0x84, 0xcb, 0x51, 0x58, 0x60, + 0xe9, 0xdd, 0x90, 0xff, 0x0a, 0x27, 0xc7, 0xf2, 0xef, 0x86, 0xbc, 0x50, 0x52, 0x18, 0x0b, 0xa5, + 0x30, 0xc6, 0xb4, 0xff, 0x4d, 0xbf, 0xbe, 0x54, 0x11, 0x62, 0xbe, 0x16, 0x49, 0xb8, 0xbe, 0x54, + 0xc1, 0x1c, 0x87, 0x66, 0xa0, 0x8f, 0xfd, 0x08, 0x27, 0x87, 0xf2, 0xa3, 0xe1, 0xb0, 0x12, 0x5a, + 0x96, 0x34, 0x56, 0x00, 0x8b, 0x82, 0x4c, 0xbb, 0x4b, 0xef, 0x46, 0x4c, 0xbb, 0xdb, 0xff, 0x80, + 0xda, 0x5d, 0xc9, 0x00, 0xc7, 0xbc, 0xd0, 0x5d, 0x38, 0x69, 0xdc, 0x47, 0xd5, 0xab, 0x1d, 0xc8, + 0x37, 0xfc, 0x26, 0x88, 0x67, 0xcf, 0x89, 0x46, 0x9f, 0x5c, 0xca, 0xe2, 0x84, 0xb3, 0x2b, 0x40, + 0x0d, 0x18, 0xaf, 0xa5, 0x6a, 0x1d, 0xe8, 0xbe, 0x56, 0x35, 0x2f, 0xd2, 0x35, 0xa6, 0x19, 0xa3, + 0x57, 0x61, 0xe0, 0x6d, 0x3f, 0x64, 0x47, 0xa4, 0xb8, 0x9a, 0xc8, 0x70, 0x0e, 0x03, 0x6f, 0xdc, + 0xa8, 0x32, 0xf8, 0xc1, 0x7e, 0x79, 0xb0, 0xe2, 0xd7, 0xe5, 0x5f, 0xac, 0x0a, 0xa0, 0xef, 0xb7, + 0x60, 0x2a, 0x7d, 0xe1, 0x55, 0x8d, 0x1e, 0xee, 0xbe, 0xd1, 0xb6, 0xa8, 0x74, 0x6a, 0x21, 0x97, + 0x1d, 0x6e, 0x53, 0x15, 0xfa, 0x10, 0x5d, 0x4f, 0xa1, 0x7b, 0x8f, 0x88, 0x14, 0xb3, 0x8f, 0xc6, + 0xeb, 0x89, 0x42, 0x0f, 0xf6, 0xcb, 0xa3, 0x7c, 0x67, 0x74, 0xef, 0xc9, 0xe7, 0x4d, 0xa2, 0x00, + 0xfa, 0x4e, 0x38, 0x19, 0xa4, 0x35, 0xa8, 0x44, 0x0a, 0xe1, 0x4f, 0x77, 0xb3, 0xcb, 0x26, 0x07, + 0x1c, 0x67, 0x31, 0xc4, 0xd9, 0xf5, 0xd8, 0xbf, 0x62, 0x31, 0xfd, 0xb6, 0x68, 0x16, 0x09, 0x5b, + 0x8d, 0xe3, 0x48, 0x6c, 0xbd, 0x60, 0xd8, 0x8e, 0x1f, 0xd8, 0xb1, 0xe8, 0x1f, 0x59, 0xcc, 0xb1, + 0xe8, 0x18, 0x5f, 0x31, 0xbd, 0x01, 0x03, 0x91, 0x4c, 0x38, 0xde, 0x26, 0x17, 0xb7, 0xd6, 0x28, + 0xe6, 0x5c, 0xa5, 0x2e, 0x39, 0x2a, 0xb7, 0xb8, 0x62, 0x63, 0xff, 0x7d, 0x3e, 0x02, 0x12, 0x73, + 0x0c, 0x26, 0xba, 0x79, 0xd3, 0x44, 0x57, 0xee, 0xf0, 0x05, 0x39, 0xa6, 0xba, 0xbf, 0x67, 0xb6, + 0x9b, 0x29, 0xf7, 0xde, 0xed, 0x1e, 0x6d, 0xf6, 0x17, 0x2c, 0x80, 0x38, 0xc8, 0x7c, 0x17, 0x29, + 0x25, 0x5f, 0xa6, 0xd7, 0x1a, 0x3f, 0xf2, 0x6b, 0x7e, 0x43, 0x18, 0x28, 0xce, 0xc6, 0x56, 0x42, + 0x0e, 0x3f, 0xd0, 0x7e, 0x63, 0x45, 0x8d, 0xca, 0x32, 0xa4, 0x65, 0x31, 0xb6, 0x5b, 0x1b, 0xe1, + 0x2c, 0xbf, 0x64, 0xc1, 0x89, 0x2c, 0x97, 0x78, 0x7a, 0x49, 0xe6, 0x6a, 0x4e, 0xe5, 0x6d, 0xa8, + 0x46, 0xf3, 0x96, 0x80, 0x63, 0x45, 0xd1, 0x75, 0xae, 0xce, 0xc3, 0x45, 0x77, 0xbf, 0x01, 0xc3, + 0x95, 0x80, 0x68, 0xf2, 0xc5, 0x6b, 0x3c, 0x4c, 0x0a, 0x6f, 0xcf, 0xb3, 0x87, 0x0e, 0x91, 0x62, + 0x7f, 0xb9, 0x00, 0x27, 0xb8, 0xd3, 0xce, 0xcc, 0xae, 0xef, 0xd6, 0x2b, 0x7e, 0x5d, 0x3c, 0x64, + 0x7c, 0x13, 0x86, 0x9a, 0x9a, 0x6e, 0xba, 0x5d, 0xa4, 0x62, 0x5d, 0x87, 0x1d, 0x6b, 0xd3, 0x74, + 0x28, 0x36, 0x78, 0xa1, 0x3a, 0x0c, 0x91, 0x5d, 0xb7, 0xa6, 0x3c, 0x3f, 0x0a, 0x87, 0x3e, 0xa4, + 0x55, 0x2d, 0x0b, 0x1a, 0x1f, 0x6c, 0x70, 0x7d, 0x08, 0x19, 0xf4, 0xed, 0x1f, 0xb5, 0xe0, 0x74, + 0x4e, 0x5c, 0x63, 0x5a, 0xdd, 0x1d, 0xe6, 0x1e, 0x25, 0xa6, 0xad, 0xaa, 0x8e, 0x3b, 0x4d, 0x61, + 0x81, 0x45, 0x1f, 0x03, 0xe0, 0x4e, 0x4f, 0xc4, 0xab, 0x75, 0x0c, 0x00, 0x6b, 0xc4, 0xae, 0xd4, + 0xc2, 0x10, 0xca, 0xf2, 0x58, 0xe3, 0x65, 0x7f, 0xa9, 0x07, 0x7a, 0x99, 0x93, 0x0d, 0xaa, 0x40, + 0xff, 0x16, 0xcf, 0x54, 0xd5, 0x76, 0xdc, 0x28, 0xad, 0x4c, 0x7e, 0x15, 0x8f, 0x9b, 0x06, 0xc5, + 0x92, 0x0d, 0x5a, 0x81, 0x09, 0x9e, 0x30, 0xac, 0x31, 0x4f, 0x1a, 0xce, 0x9e, 0x54, 0xfb, 0xf2, + 0x1c, 0xd8, 0x4a, 0xfd, 0xbd, 0x94, 0x26, 0xc1, 0x59, 0xe5, 0xd0, 0x6b, 0x30, 0x42, 0xaf, 0xe1, + 0x7e, 0x2b, 0x92, 0x9c, 0x78, 0xaa, 0x30, 0x75, 0x33, 0x59, 0x33, 0xb0, 0x38, 0x41, 0x8d, 0x5e, + 0x85, 0xe1, 0x66, 0x4a, 0xc1, 0xdd, 0x1b, 0x6b, 0x82, 0x4c, 0xa5, 0xb6, 0x49, 0xcb, 0xbc, 0xe2, + 0x5b, 0xec, 0x0d, 0xc0, 0xda, 0x56, 0x40, 0xc2, 0x2d, 0xbf, 0x51, 0x67, 0x12, 0x70, 0xaf, 0xe6, + 0x15, 0x9f, 0xc0, 0xe3, 0x54, 0x09, 0xca, 0x65, 0xc3, 0x71, 0x1b, 0xad, 0x80, 0xc4, 0x5c, 0xfa, + 0x4c, 0x2e, 0x8b, 0x09, 0x3c, 0x4e, 0x95, 0xe8, 0xac, 0xb9, 0xef, 0x3f, 0x1a, 0xcd, 0xbd, 0xfd, + 0xd3, 0x05, 0x30, 0x86, 0xf6, 0xdb, 0x37, 0x85, 0x19, 0xfd, 0xb2, 0xcd, 0xa0, 0x59, 0x13, 0x0e, + 0x65, 0x99, 0x5f, 0x16, 0xe7, 0x2f, 0xe6, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8a, 0xae, 0xf1, 0x93, + 0x95, 0xc0, 0xa7, 0x87, 0x9c, 0x0c, 0xa4, 0xa7, 0x1e, 0x9f, 0xf4, 0xcb, 0x20, 0x03, 0x6d, 0x42, + 0xce, 0x0a, 0xf7, 0x7c, 0xce, 0xc1, 0xf0, 0xbd, 0xaa, 0x8a, 0x68, 0x1f, 0x92, 0x0b, 0xba, 0x02, + 0x83, 0x22, 0x2f, 0x15, 0x7b, 0x23, 0xc1, 0x17, 0x13, 0xf3, 0x15, 0x9b, 0x8f, 0xc1, 0x58, 0xa7, + 0xb1, 0x7f, 0xa0, 0x00, 0x13, 0x19, 0x8f, 0xdc, 0xf8, 0x31, 0xb2, 0xe9, 0x86, 0x91, 0x4a, 0x91, + 0xac, 0x1d, 0x23, 0x1c, 0x8e, 0x15, 0x05, 0xdd, 0xab, 0xf8, 0x41, 0x95, 0x3c, 0x9c, 0xc4, 0x23, + 0x12, 0x81, 0x3d, 0x64, 0xb2, 0xe1, 0x0b, 0xd0, 0xd3, 0x0a, 0x89, 0x0c, 0x16, 0xad, 0x8e, 0x6d, + 0x66, 0xd6, 0x66, 0x18, 0x7a, 0x05, 0xdc, 0x54, 0x16, 0x62, 0xed, 0x0a, 0xc8, 0x6d, 0xc4, 0x1c, + 0x47, 0x1b, 0x17, 0x11, 0xcf, 0xf1, 0x22, 0x71, 0x51, 0x8c, 0xa3, 0x9e, 0x32, 0x28, 0x16, 0x58, + 0xfb, 0x8b, 0x45, 0x38, 0x93, 0xfb, 0xec, 0x95, 0x36, 0x7d, 0xc7, 0xf7, 0xdc, 0xc8, 0x57, 0x4e, + 0x78, 0x3c, 0xd2, 0x29, 0x69, 0x6e, 0xad, 0x08, 0x38, 0x56, 0x14, 0xe8, 0x22, 0xf4, 0x32, 0xa5, + 0x78, 0x2a, 0x59, 0xf4, 0xec, 0x3c, 0x0f, 0x7d, 0xc7, 0xd1, 0x5d, 0xe7, 0xf7, 0x7f, 0x8c, 0x4a, + 0x30, 0x7e, 0x23, 0x79, 0xa0, 0xd0, 0xe6, 0xfa, 0x7e, 0x03, 0x33, 0x24, 0x7a, 0x42, 0xf4, 0x57, + 0xc2, 0xeb, 0x0c, 0x3b, 0x75, 0x3f, 0xd4, 0x3a, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0x7b, 0x81, 0xeb, + 0x6d, 0x26, 0xbd, 0x11, 0xaf, 0x73, 0x30, 0x96, 0x78, 0x33, 0x6f, 0x69, 0xff, 0x51, 0x27, 0xe6, + 0x1f, 0xe8, 0x28, 0x9e, 0xfc, 0x50, 0x11, 0x46, 0xf1, 0xec, 0xfc, 0x7b, 0x03, 0x71, 0x33, 0x3d, + 0x10, 0x47, 0x9d, 0x98, 0xbf, 0xf3, 0x68, 0xfc, 0xa2, 0x05, 0xa3, 0x2c, 0x3b, 0x96, 0x88, 0x59, + 0xe1, 0xfa, 0xde, 0x31, 0x5c, 0x05, 0x1e, 0x83, 0xde, 0x80, 0x56, 0x9a, 0xcc, 0x12, 0xcd, 0x5a, + 0x82, 0x39, 0x0e, 0x9d, 0x85, 0x1e, 0xd6, 0x04, 0x3a, 0x78, 0x43, 0x7c, 0x0b, 0x9e, 0x77, 0x22, + 0x07, 0x33, 0x28, 0x0b, 0xfc, 0x86, 0x49, 0xb3, 0xe1, 0xf2, 0x46, 0xc7, 0x2e, 0x0b, 0xef, 0x8e, + 0x80, 0x18, 0x99, 0x4d, 0x7b, 0x67, 0x81, 0xdf, 0xb2, 0x59, 0xb6, 0xbf, 0x66, 0xff, 0x79, 0x01, + 0xce, 0x67, 0x96, 0xeb, 0x3a, 0xf0, 0x5b, 0xfb, 0xd2, 0x0f, 0x33, 0xff, 0x51, 0xf1, 0x18, 0x7d, + 0xbd, 0x7b, 0xba, 0x95, 0xfe, 0x7b, 0xbb, 0x88, 0xc7, 0x96, 0xd9, 0x65, 0xef, 0x92, 0x78, 0x6c, + 0x99, 0x6d, 0xcb, 0x51, 0x13, 0xfc, 0x75, 0x21, 0xe7, 0x5b, 0x98, 0xc2, 0xe0, 0x12, 0xdd, 0x67, + 0x18, 0x32, 0x94, 0x97, 0x70, 0xbe, 0xc7, 0x70, 0x18, 0x56, 0x58, 0x34, 0x03, 0xa3, 0x3b, 0xae, + 0x47, 0x37, 0x9f, 0x3d, 0x53, 0x14, 0x57, 0xb6, 0x8c, 0x15, 0x13, 0x8d, 0x93, 0xf4, 0xc8, 0xd5, + 0x62, 0xb5, 0xf1, 0xaf, 0x7b, 0xf5, 0x50, 0xab, 0x6e, 0xda, 0x74, 0xe7, 0x50, 0xbd, 0x98, 0x11, + 0xb7, 0x6d, 0x45, 0xd3, 0x13, 0x15, 0xbb, 0xd7, 0x13, 0x0d, 0x65, 0xeb, 0x88, 0xa6, 0x5e, 0x85, + 0xe1, 0x07, 0xb6, 0x8d, 0xd8, 0x5f, 0x2f, 0xc2, 0x23, 0x6d, 0x96, 0x3d, 0xdf, 0xeb, 0x8d, 0x31, + 0xd0, 0xf6, 0xfa, 0xd4, 0x38, 0x54, 0xe0, 0xc4, 0x46, 0xab, 0xd1, 0xd8, 0x63, 0x4f, 0xa0, 0x48, + 0x5d, 0x52, 0x08, 0x99, 0x52, 0x2a, 0x47, 0x4e, 0x2c, 0x66, 0xd0, 0xe0, 0xcc, 0x92, 0xf4, 0x8a, + 0x45, 0x4f, 0x92, 0x3d, 0xc5, 0x2a, 0x71, 0xc5, 0xc2, 0x3a, 0x12, 0x9b, 0xb4, 0xe8, 0x2a, 0x8c, + 0x3b, 0xbb, 0x8e, 0xcb, 0x03, 0xde, 0x4b, 0x06, 0xfc, 0x8e, 0xa5, 0x74, 0xd1, 0x33, 0x49, 0x02, + 0x9c, 0x2e, 0x83, 0x5e, 0x07, 0xe4, 0xaf, 0xb3, 0x87, 0x12, 0xf5, 0xab, 0xc4, 0x13, 0x56, 0x77, + 0x36, 0x76, 0xc5, 0x78, 0x4b, 0xb8, 0x91, 0xa2, 0xc0, 0x19, 0xa5, 0x12, 0x81, 0xc9, 0xfa, 0xf2, + 0x03, 0x93, 0xb5, 0xdf, 0x17, 0x3b, 0xa6, 0xde, 0xba, 0x02, 0xc3, 0x87, 0x74, 0xff, 0xb5, 0xff, + 0x8d, 0x05, 0x4a, 0x41, 0x6c, 0x46, 0xfd, 0x7d, 0x95, 0xf9, 0x27, 0x73, 0xd5, 0xb6, 0x16, 0x2d, + 0xe9, 0xa4, 0xe6, 0x9f, 0x1c, 0x23, 0xb1, 0x49, 0xcb, 0xe7, 0x90, 0xe6, 0x57, 0x6c, 0xdc, 0x0a, + 0x44, 0x68, 0x42, 0x45, 0x81, 0x3e, 0x0e, 0xfd, 0x75, 0x77, 0xd7, 0x0d, 0x85, 0x72, 0xec, 0xd0, + 0xc6, 0xb8, 0x78, 0xeb, 0x9c, 0xe7, 0x6c, 0xb0, 0xe4, 0x67, 0xff, 0x50, 0x21, 0xee, 0x93, 0x37, + 0x5a, 0x7e, 0xe4, 0x1c, 0xc3, 0x49, 0x7e, 0xd5, 0x38, 0xc9, 0x9f, 0x68, 0x17, 0x9f, 0x91, 0x35, + 0x29, 0xf7, 0x04, 0xbf, 0x91, 0x38, 0xc1, 0x9f, 0xec, 0xcc, 0xaa, 0xfd, 0xc9, 0xfd, 0x0f, 0x2c, + 0x18, 0x37, 0xe8, 0x8f, 0xe1, 0x00, 0x59, 0x34, 0x0f, 0x90, 0x47, 0x3b, 0x7e, 0x43, 0xce, 0xc1, + 0xf1, 0xbd, 0xc5, 0x44, 0xdb, 0xd9, 0x81, 0xf1, 0x36, 0xf4, 0x6c, 0x39, 0x41, 0xbd, 0x5d, 0x3e, + 0x9a, 0x54, 0xa1, 0xe9, 0x6b, 0x4e, 0x20, 0x3c, 0x15, 0x9e, 0x95, 0xbd, 0x4e, 0x41, 0x1d, 0xbd, + 0x14, 0x58, 0x55, 0xe8, 0x65, 0xe8, 0x0b, 0x6b, 0x7e, 0x53, 0xbd, 0x99, 0xba, 0xc0, 0x3a, 0x9a, + 0x41, 0x0e, 0xf6, 0xcb, 0xc8, 0xac, 0x8e, 0x82, 0xb1, 0xa0, 0x47, 0x6f, 0xc2, 0x30, 0xfb, 0xa5, + 0xdc, 0x06, 0x8b, 0xf9, 0x1a, 0x8c, 0xaa, 0x4e, 0xc8, 0x7d, 0x6a, 0x0d, 0x10, 0x36, 0x59, 0x4d, + 0x6d, 0x42, 0x49, 0x7d, 0xd6, 0x43, 0xb5, 0x76, 0xff, 0xab, 0x22, 0x4c, 0x64, 0xcc, 0x39, 0x14, + 0x1a, 0x23, 0x71, 0xa5, 0xcb, 0xa9, 0xfa, 0x0e, 0xc7, 0x22, 0x64, 0x17, 0xa8, 0xba, 0x98, 0x5b, + 0x5d, 0x57, 0x7a, 0x33, 0x24, 0xc9, 0x4a, 0x29, 0xa8, 0x73, 0xa5, 0xb4, 0xb2, 0x63, 0xeb, 0x6a, + 0x5a, 0x91, 0x6a, 0xe9, 0x43, 0x1d, 0xd3, 0x5f, 0xef, 0x81, 0x13, 0x59, 0x21, 0x63, 0xd1, 0x67, + 0x13, 0xd9, 0x90, 0x5f, 0xec, 0x36, 0xd8, 0x2c, 0x4f, 0x91, 0x2c, 0xc2, 0x40, 0x4e, 0x9b, 0xf9, + 0x91, 0x3b, 0x76, 0xb3, 0xa8, 0x93, 0x05, 0xa0, 0x09, 0x78, 0x16, 0x6b, 0xb9, 0x7d, 0x7c, 0xa0, + 0xeb, 0x06, 0x88, 0xf4, 0xd7, 0x61, 0xc2, 0x25, 0x49, 0x82, 0x3b, 0xbb, 0x24, 0xc9, 0x9a, 0xd1, + 0x12, 0xf4, 0xd5, 0xb8, 0xaf, 0x4b, 0xb1, 0xf3, 0x16, 0xc6, 0x1d, 0x5d, 0xd4, 0x06, 0x2c, 0x1c, + 0x5c, 0x04, 0x83, 0x29, 0x17, 0x06, 0xb5, 0x8e, 0x79, 0xa8, 0x93, 0x67, 0x9b, 0x1e, 0x7c, 0x5a, + 0x17, 0x3c, 0xd4, 0x09, 0xf4, 0xa3, 0x16, 0x24, 0x1e, 0xbc, 0x28, 0xa5, 0x9c, 0x95, 0xab, 0x94, + 0xbb, 0x00, 0x3d, 0x81, 0xdf, 0x20, 0xc9, 0x0c, 0xc4, 0xd8, 0x6f, 0x10, 0xcc, 0x30, 0x94, 0x22, + 0x8a, 0x55, 0x2d, 0x43, 0xfa, 0x35, 0x52, 0x5c, 0x10, 0x1f, 0x83, 0xde, 0x06, 0xd9, 0x25, 0x8d, + 0x64, 0xa2, 0xb8, 0x65, 0x0a, 0xc4, 0x1c, 0x67, 0xff, 0x62, 0x0f, 0x9c, 0x6b, 0x1b, 0x0d, 0x8a, + 0x5e, 0xc6, 0x36, 0x9d, 0x88, 0xdc, 0x71, 0xf6, 0x92, 0x19, 0x9d, 0xae, 0x72, 0x30, 0x96, 0x78, + 0xf6, 0xfc, 0x93, 0x27, 0x66, 0x48, 0xa8, 0x30, 0x45, 0x3e, 0x06, 0x81, 0x35, 0x55, 0x62, 0xc5, + 0xa3, 0x50, 0x89, 0x3d, 0x0f, 0x10, 0x86, 0x0d, 0xee, 0x16, 0x58, 0x17, 0xef, 0x4a, 0xe3, 0x04, + 0x1e, 0xd5, 0x65, 0x81, 0xc1, 0x1a, 0x15, 0x9a, 0x87, 0xb1, 0x66, 0xe0, 0x47, 0x5c, 0x23, 0x3c, + 0xcf, 0x3d, 0x67, 0x7b, 0xcd, 0x40, 0x3c, 0x95, 0x04, 0x1e, 0xa7, 0x4a, 0xa0, 0x97, 0x60, 0x50, + 0x04, 0xe7, 0xa9, 0xf8, 0x7e, 0x43, 0x28, 0xa1, 0x94, 0x33, 0x69, 0x35, 0x46, 0x61, 0x9d, 0x4e, + 0x2b, 0xc6, 0xd4, 0xcc, 0xfd, 0x99, 0xc5, 0xb8, 0xaa, 0x59, 0xa3, 0x4b, 0x44, 0xa2, 0x1e, 0xe8, + 0x2a, 0x12, 0x75, 0xac, 0x96, 0x2b, 0x75, 0x6d, 0xf5, 0x84, 0x8e, 0x8a, 0xac, 0xaf, 0xf4, 0xc0, + 0x84, 0x98, 0x38, 0x0f, 0x7b, 0xba, 0xdc, 0x4c, 0x4f, 0x97, 0xa3, 0x50, 0xdc, 0xbd, 0x37, 0x67, + 0x8e, 0x7b, 0xce, 0xfc, 0xb0, 0x05, 0xa6, 0xa4, 0x86, 0xfe, 0xb7, 0xdc, 0x94, 0x78, 0x2f, 0xe5, + 0x4a, 0x7e, 0x71, 0x94, 0xdf, 0x77, 0x96, 0x1c, 0xcf, 0xfe, 0xd7, 0x16, 0x3c, 0xda, 0x91, 0x23, + 0x5a, 0x80, 0x12, 0x13, 0x27, 0xb5, 0x8b, 0xde, 0x93, 0xca, 0xb3, 0x5e, 0x22, 0x72, 0xa4, 0xdb, + 0xb8, 0x24, 0x5a, 0x48, 0xe5, 0x1e, 0x7c, 0x2a, 0x23, 0xf7, 0xe0, 0x49, 0xa3, 0x7b, 0x1e, 0x30, + 0xf9, 0xe0, 0x0f, 0xd2, 0x13, 0xc7, 0x78, 0xd5, 0x86, 0x3e, 0x60, 0x28, 0x1d, 0xed, 0x84, 0xd2, + 0x11, 0x99, 0xd4, 0xda, 0x19, 0xf2, 0x51, 0x18, 0x63, 0x51, 0xfb, 0xd8, 0x3b, 0x0f, 0xf1, 0xde, + 0xae, 0x10, 0xfb, 0x72, 0x2f, 0x27, 0x70, 0x38, 0x45, 0x6d, 0xff, 0x69, 0x11, 0xfa, 0xf8, 0xf2, + 0x3b, 0x86, 0xeb, 0xe5, 0x33, 0x50, 0x72, 0x77, 0x76, 0x5a, 0x3c, 0x9d, 0x5c, 0x6f, 0xec, 0x19, + 0xbc, 0x24, 0x81, 0x38, 0xc6, 0xa3, 0x45, 0xa1, 0xef, 0x6e, 0x13, 0x18, 0x98, 0x37, 0x7c, 0x7a, + 0xde, 0x89, 0x1c, 0x2e, 0x2b, 0xa9, 0x73, 0x36, 0xd6, 0x8c, 0xa3, 0x4f, 0x01, 0x84, 0x51, 0xe0, + 0x7a, 0x9b, 0x14, 0x26, 0x62, 0xab, 0x3f, 0xdd, 0x86, 0x5b, 0x55, 0x11, 0x73, 0x9e, 0xf1, 0x9e, + 0xa3, 0x10, 0x58, 0xe3, 0x88, 0xa6, 0x8d, 0x93, 0x7e, 0x2a, 0x31, 0x76, 0xc0, 0xb9, 0xc6, 0x63, + 0x36, 0xf5, 0x41, 0x28, 0x29, 0xe6, 0x9d, 0xb4, 0x5f, 0x43, 0xba, 0x58, 0xf4, 0x11, 0x18, 0x4d, + 0xb4, 0xed, 0x50, 0xca, 0xb3, 0x5f, 0xb2, 0x60, 0x94, 0x37, 0x66, 0xc1, 0xdb, 0x15, 0xa7, 0xc1, + 0x3d, 0x38, 0xd1, 0xc8, 0xd8, 0x95, 0xc5, 0xf0, 0x77, 0xbf, 0x8b, 0x2b, 0x65, 0x59, 0x16, 0x16, + 0x67, 0xd6, 0x81, 0x2e, 0xd1, 0x15, 0x47, 0x77, 0x5d, 0xa7, 0x21, 0xe2, 0x1b, 0x0c, 0xf1, 0xd5, + 0xc6, 0x61, 0x58, 0x61, 0xed, 0x3f, 0xb0, 0x60, 0x9c, 0xb7, 0xfc, 0x3a, 0xd9, 0x53, 0x7b, 0xd3, + 0x37, 0xb3, 0xed, 0x22, 0x91, 0x69, 0x21, 0x27, 0x91, 0xa9, 0xfe, 0x69, 0xc5, 0xb6, 0x9f, 0xf6, + 0x65, 0x0b, 0xc4, 0x0c, 0x39, 0x06, 0x7d, 0xc6, 0x77, 0x98, 0xfa, 0x8c, 0xa9, 0xfc, 0x45, 0x90, + 0xa3, 0xc8, 0xf8, 0x2b, 0x0b, 0xc6, 0x38, 0x41, 0x6c, 0xab, 0xff, 0xa6, 0x8e, 0xc3, 0xac, 0xf9, + 0x45, 0x99, 0xce, 0x97, 0xd7, 0xc9, 0xde, 0x9a, 0x5f, 0x71, 0xa2, 0xad, 0xec, 0x8f, 0x32, 0x06, + 0xab, 0xa7, 0xed, 0x60, 0xd5, 0xe5, 0x02, 0x32, 0xf2, 0x7c, 0x75, 0x08, 0x10, 0x70, 0xd8, 0x3c, + 0x5f, 0xf6, 0x9f, 0x59, 0x80, 0x78, 0x35, 0x86, 0xe0, 0x46, 0xc5, 0x21, 0x06, 0xd5, 0x0e, 0xba, + 0x78, 0x6b, 0x52, 0x18, 0xac, 0x51, 0x1d, 0x49, 0xf7, 0x24, 0x1c, 0x2e, 0x8a, 0x9d, 0x1d, 0x2e, + 0x0e, 0xd1, 0xa3, 0xff, 0xac, 0x0f, 0x92, 0x2f, 0xfb, 0xd0, 0x2d, 0x18, 0xaa, 0x39, 0x4d, 0x67, + 0xdd, 0x6d, 0xb8, 0x91, 0x4b, 0xc2, 0x76, 0xde, 0x58, 0x73, 0x1a, 0x9d, 0x30, 0x91, 0x6b, 0x10, + 0x6c, 0xf0, 0x41, 0xd3, 0x00, 0xcd, 0xc0, 0xdd, 0x75, 0x1b, 0x64, 0x93, 0xa9, 0x5d, 0x58, 0x44, + 0x15, 0xee, 0x1a, 0x26, 0xa1, 0x58, 0xa3, 0xc8, 0x08, 0xa3, 0x50, 0x7c, 0xc8, 0x61, 0x14, 0xe0, + 0xd8, 0xc2, 0x28, 0xf4, 0x1c, 0x2a, 0x8c, 0xc2, 0xc0, 0xa1, 0xc3, 0x28, 0xf4, 0x76, 0x15, 0x46, + 0x01, 0xc3, 0x29, 0x29, 0x7b, 0xd2, 0xff, 0x8b, 0x6e, 0x83, 0x88, 0x0b, 0x07, 0x0f, 0x03, 0x33, + 0x75, 0x7f, 0xbf, 0x7c, 0x0a, 0x67, 0x52, 0xe0, 0x9c, 0x92, 0xe8, 0x63, 0x30, 0xe9, 0x34, 0x1a, + 0xfe, 0x1d, 0x35, 0xa8, 0x0b, 0x61, 0xcd, 0x69, 0x70, 0x13, 0x48, 0x3f, 0xe3, 0x7a, 0xf6, 0xfe, + 0x7e, 0x79, 0x72, 0x26, 0x87, 0x06, 0xe7, 0x96, 0x46, 0x1f, 0x86, 0x52, 0x33, 0xf0, 0x6b, 0x2b, + 0xda, 0xf3, 0xe3, 0xf3, 0xb4, 0x03, 0x2b, 0x12, 0x78, 0xb0, 0x5f, 0x1e, 0x56, 0x7f, 0xd8, 0x81, + 0x1f, 0x17, 0xc8, 0x88, 0x8b, 0x30, 0x78, 0xa4, 0x71, 0x11, 0xb6, 0x61, 0xa2, 0x4a, 0x02, 0xd7, + 0x69, 0xb8, 0xf7, 0xa8, 0xbc, 0x2c, 0xf7, 0xa7, 0x35, 0x28, 0x05, 0x89, 0x1d, 0xb9, 0xab, 0x60, + 0xbd, 0x5a, 0xc2, 0x25, 0xb9, 0x03, 0xc7, 0x8c, 0xec, 0xff, 0x66, 0x41, 0xbf, 0x78, 0xc9, 0x77, + 0x0c, 0x52, 0xe3, 0x8c, 0x61, 0x94, 0x28, 0x67, 0x77, 0x18, 0x6b, 0x4c, 0xae, 0x39, 0x62, 0x29, + 0x61, 0x8e, 0x78, 0xb4, 0x1d, 0x93, 0xf6, 0x86, 0x88, 0xff, 0xaf, 0x48, 0xa5, 0x77, 0xe3, 0x4d, + 0xf9, 0xc3, 0xef, 0x82, 0x55, 0xe8, 0x0f, 0xc5, 0x9b, 0xe6, 0x42, 0xfe, 0x6b, 0x90, 0xe4, 0x20, + 0xc6, 0x5e, 0x74, 0xe2, 0x15, 0xb3, 0x64, 0x92, 0xf9, 0x58, 0xba, 0xf8, 0x10, 0x1f, 0x4b, 0x77, + 0x7a, 0x75, 0xdf, 0x73, 0x14, 0xaf, 0xee, 0xed, 0xaf, 0xb1, 0x93, 0x53, 0x87, 0x1f, 0x83, 0x50, + 0x75, 0xd5, 0x3c, 0x63, 0xed, 0x36, 0x33, 0x4b, 0x34, 0x2a, 0x47, 0xb8, 0xfa, 0x05, 0x0b, 0xce, + 0x65, 0x7c, 0x95, 0x26, 0x69, 0x3d, 0x0b, 0x03, 0x4e, 0xab, 0xee, 0xaa, 0xb5, 0xac, 0x99, 0x26, + 0x67, 0x04, 0x1c, 0x2b, 0x0a, 0x34, 0x07, 0xe3, 0xe4, 0x6e, 0xd3, 0xe5, 0x86, 0x5c, 0xdd, 0xf9, + 0xb8, 0xc8, 0x9f, 0x7f, 0x2e, 0x24, 0x91, 0x38, 0x4d, 0xaf, 0x02, 0x44, 0x15, 0x73, 0x03, 0x44, + 0xfd, 0xbc, 0x05, 0x83, 0xea, 0x55, 0xef, 0x43, 0xef, 0xed, 0x8f, 0x9a, 0xbd, 0xfd, 0x48, 0x9b, + 0xde, 0xce, 0xe9, 0xe6, 0xdf, 0x2b, 0xa8, 0xf6, 0x56, 0xfc, 0x20, 0xea, 0x42, 0x82, 0x7b, 0xf0, + 0x87, 0x13, 0x57, 0x60, 0xd0, 0x69, 0x36, 0x25, 0x42, 0x7a, 0xc0, 0xb1, 0xd0, 0xeb, 0x31, 0x18, + 0xeb, 0x34, 0xea, 0x1d, 0x47, 0x31, 0xf7, 0x1d, 0x47, 0x1d, 0x20, 0x72, 0x82, 0x4d, 0x12, 0x51, + 0x98, 0x70, 0xd8, 0xcd, 0xdf, 0x6f, 0x5a, 0x91, 0xdb, 0x98, 0x76, 0xbd, 0x28, 0x8c, 0x82, 0xe9, + 0x25, 0x2f, 0xba, 0x11, 0xf0, 0x2b, 0xa4, 0x16, 0x62, 0x4d, 0xf1, 0xc2, 0x1a, 0x5f, 0x19, 0xc1, + 0x82, 0xd5, 0xd1, 0x6b, 0xba, 0x52, 0xac, 0x0a, 0x38, 0x56, 0x14, 0xf6, 0x07, 0xd9, 0xe9, 0xc3, + 0xfa, 0xf4, 0x70, 0xe1, 0xc5, 0x7e, 0x72, 0x48, 0x8d, 0x06, 0x33, 0x8a, 0xce, 0xeb, 0x41, 0xcc, + 0xda, 0x6f, 0xf6, 0xb4, 0x62, 0xfd, 0x45, 0x64, 0x1c, 0xe9, 0x0c, 0x7d, 0x22, 0xe5, 0x1e, 0xf3, + 0x5c, 0x87, 0x53, 0xe3, 0x10, 0x0e, 0x31, 0x2c, 0x0f, 0x13, 0xcb, 0x52, 0xb3, 0x54, 0x11, 0xeb, + 0x42, 0xcb, 0xc3, 0x24, 0x10, 0x38, 0xa6, 0xa1, 0xc2, 0x94, 0xfa, 0x13, 0x4e, 0xa2, 0x38, 0x16, + 0xb0, 0xa2, 0x0e, 0xb1, 0x46, 0x81, 0x2e, 0x0b, 0x85, 0x02, 0xb7, 0x0b, 0x3c, 0x92, 0x50, 0x28, + 0xc8, 0xee, 0xd2, 0xb4, 0x40, 0x57, 0x60, 0x90, 0xdc, 0x8d, 0x48, 0xe0, 0x39, 0x0d, 0x5a, 0x43, + 0x6f, 0x1c, 0x3f, 0x73, 0x21, 0x06, 0x63, 0x9d, 0x06, 0xad, 0xc1, 0x68, 0xc8, 0xf5, 0x6c, 0x2a, + 0x48, 0x3c, 0xd7, 0x57, 0x3e, 0xad, 0xde, 0x53, 0x9b, 0xe8, 0x03, 0x06, 0xe2, 0xbb, 0x93, 0x8c, + 0x32, 0x91, 0x64, 0x81, 0x5e, 0x83, 0x91, 0x86, 0xef, 0xd4, 0x67, 0x9d, 0x86, 0xe3, 0xd5, 0x58, + 0xff, 0x0c, 0x98, 0x89, 0xa8, 0x97, 0x0d, 0x2c, 0x4e, 0x50, 0x53, 0xe1, 0x4d, 0x87, 0x88, 0x30, + 0x6d, 0x8e, 0xb7, 0x49, 0x42, 0x91, 0x0f, 0x9e, 0x09, 0x6f, 0xcb, 0x39, 0x34, 0x38, 0xb7, 0x34, + 0x7a, 0x19, 0x86, 0xe4, 0xe7, 0x6b, 0x41, 0x59, 0xe2, 0x27, 0x31, 0x1a, 0x0e, 0x1b, 0x94, 0x28, + 0x84, 0x93, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x35, 0x11, 0xa9, 0x80, 0x3f, 0x1f, 0xfe, + 0x88, 0x7c, 0xab, 0xb8, 0x90, 0x45, 0x74, 0xb0, 0x5f, 0x3e, 0x2b, 0x7a, 0x2d, 0x13, 0x8f, 0xb3, + 0x79, 0xa3, 0x15, 0x98, 0xd8, 0x22, 0x4e, 0x23, 0xda, 0x9a, 0xdb, 0x22, 0xb5, 0x6d, 0xb9, 0xe0, + 0x58, 0x98, 0x17, 0xed, 0xe9, 0xc8, 0xb5, 0x34, 0x09, 0xce, 0x2a, 0x87, 0xde, 0x82, 0xc9, 0x66, + 0x6b, 0xbd, 0xe1, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x9c, 0x90, 0x66, 0xea, 0xf5, 0x80, 0x84, 0xfc, + 0x75, 0x29, 0x3b, 0x7a, 0x65, 0x20, 0x9d, 0x4a, 0x0e, 0x1d, 0xce, 0xe5, 0x80, 0xee, 0xc1, 0xc9, + 0xc4, 0x44, 0x10, 0x11, 0x31, 0x46, 0xf2, 0x53, 0xc4, 0x54, 0xb3, 0x0a, 0x88, 0xe0, 0x32, 0x59, + 0x28, 0x9c, 0x5d, 0x05, 0x7a, 0x05, 0xc0, 0x6d, 0x2e, 0x3a, 0x3b, 0x6e, 0x83, 0x5e, 0x15, 0x27, + 0xd8, 0x1c, 0xa1, 0xd7, 0x06, 0x58, 0xaa, 0x48, 0x28, 0xdd, 0x9b, 0xc5, 0xbf, 0x3d, 0xac, 0x51, + 0xa3, 0x65, 0x18, 0x11, 0xff, 0xf6, 0xc4, 0x90, 0xf2, 0xc0, 0x2c, 0x8f, 0xb3, 0xa8, 0x5a, 0x15, + 0x1d, 0x73, 0x90, 0x82, 0xe0, 0x44, 0x59, 0xb4, 0x09, 0xe7, 0x64, 0xa2, 0x3f, 0x7d, 0x7e, 0xca, + 0x31, 0x08, 0x59, 0x5e, 0x96, 0x01, 0xfe, 0x2a, 0x65, 0xa6, 0x1d, 0x21, 0x6e, 0xcf, 0x87, 0x9e, + 0xeb, 0xfa, 0x34, 0xe7, 0x6f, 0x8e, 0x4f, 0xc6, 0x11, 0x07, 0x97, 0x93, 0x48, 0x9c, 0xa6, 0x47, + 0x3e, 0x9c, 0x74, 0xbd, 0xac, 0x59, 0x7d, 0x8a, 0x31, 0xfa, 0x10, 0x7f, 0x6e, 0xdd, 0x7e, 0x46, + 0x67, 0xe2, 0x71, 0x36, 0xdf, 0x77, 0xe6, 0xf7, 0xf7, 0xfb, 0x16, 0x2d, 0xad, 0x49, 0xe7, 0xe8, + 0xd3, 0x30, 0xa4, 0x7f, 0x94, 0x90, 0x34, 0x2e, 0x66, 0x0b, 0xaf, 0xda, 0x9e, 0xc0, 0x65, 0x7b, + 0xb5, 0xee, 0x75, 0x1c, 0x36, 0x38, 0xa2, 0x5a, 0x46, 0x6c, 0x83, 0xcb, 0xdd, 0x49, 0x32, 0xdd, + 0xbb, 0xbd, 0x11, 0xc8, 0x9e, 0xee, 0x68, 0x19, 0x06, 0x6a, 0x0d, 0x97, 0x78, 0xd1, 0x52, 0xa5, + 0x5d, 0xf4, 0xc6, 0x39, 0x41, 0x23, 0xd6, 0x8f, 0x48, 0xb1, 0xc2, 0x61, 0x58, 0x71, 0xb0, 0x7f, + 0xb3, 0x00, 0xe5, 0x0e, 0xf9, 0x7a, 0x12, 0x66, 0x28, 0xab, 0x2b, 0x33, 0xd4, 0x0c, 0x8c, 0xc6, + 0xff, 0x74, 0x0d, 0x97, 0xf2, 0x64, 0xbd, 0x65, 0xa2, 0x71, 0x92, 0xbe, 0xeb, 0x47, 0x09, 0xba, + 0x25, 0xab, 0xa7, 0xe3, 0xb3, 0x1a, 0xc3, 0x82, 0xdd, 0xdb, 0xfd, 0xb5, 0x37, 0xd7, 0x1a, 0x69, + 0x7f, 0xad, 0x00, 0x27, 0x55, 0x17, 0x7e, 0xfb, 0x76, 0xdc, 0xcd, 0x74, 0xc7, 0x1d, 0x81, 0x2d, + 0xd7, 0xbe, 0x01, 0x7d, 0x3c, 0x1c, 0x65, 0x17, 0xe2, 0xf6, 0x63, 0x66, 0x94, 0x6c, 0x25, 0xe1, + 0x19, 0x91, 0xb2, 0xbf, 0xdf, 0x82, 0xd1, 0xc4, 0xeb, 0x36, 0x84, 0xb5, 0x27, 0xd0, 0x0f, 0x22, + 0x12, 0x67, 0x09, 0xdb, 0x17, 0xa0, 0x67, 0xcb, 0x0f, 0xa3, 0xa4, 0xa3, 0xc7, 0x35, 0x3f, 0x8c, + 0x30, 0xc3, 0xd8, 0x7f, 0x68, 0x41, 0xef, 0x9a, 0xe3, 0x7a, 0x91, 0x34, 0x0a, 0x58, 0x39, 0x46, + 0x81, 0x6e, 0xbe, 0x0b, 0xbd, 0x04, 0x7d, 0x64, 0x63, 0x83, 0xd4, 0x22, 0x31, 0xaa, 0x32, 0x14, + 0x42, 0xdf, 0x02, 0x83, 0x52, 0xf9, 0x8f, 0x55, 0xc6, 0xff, 0x62, 0x41, 0x8c, 0x6e, 0x43, 0x29, + 0x72, 0x77, 0xc8, 0x4c, 0xbd, 0x2e, 0x4c, 0xe5, 0x0f, 0x10, 0xbf, 0x63, 0x4d, 0x32, 0xc0, 0x31, + 0x2f, 0xfb, 0x8b, 0x05, 0x80, 0x38, 0x8e, 0x57, 0xa7, 0x4f, 0x9c, 0x4d, 0x19, 0x51, 0x2f, 0x66, + 0x18, 0x51, 0x51, 0xcc, 0x30, 0xc3, 0x82, 0xaa, 0xba, 0xa9, 0xd8, 0x55, 0x37, 0xf5, 0x1c, 0xa6, + 0x9b, 0xe6, 0x60, 0x3c, 0x8e, 0x43, 0x66, 0x86, 0x61, 0x64, 0x47, 0xe7, 0x5a, 0x12, 0x89, 0xd3, + 0xf4, 0x36, 0x81, 0x0b, 0x2a, 0x1c, 0x93, 0x38, 0xd1, 0x98, 0x1f, 0xb8, 0x6e, 0x94, 0xee, 0xd0, + 0x4f, 0xb1, 0x95, 0xb8, 0x90, 0x6b, 0x25, 0xfe, 0x09, 0x0b, 0x4e, 0x24, 0xeb, 0x61, 0x8f, 0xa6, + 0xbf, 0x60, 0xc1, 0x49, 0x66, 0x2b, 0x67, 0xb5, 0xa6, 0x2d, 0xf3, 0x2f, 0xb6, 0x0d, 0x31, 0x95, + 0xd3, 0xe2, 0x38, 0xe6, 0xc6, 0x4a, 0x16, 0x6b, 0x9c, 0x5d, 0xa3, 0xfd, 0x5f, 0x7b, 0x60, 0x32, + 0x2f, 0x36, 0x15, 0x7b, 0x26, 0xe2, 0xdc, 0xad, 0x6e, 0x93, 0x3b, 0xc2, 0x19, 0x3f, 0x7e, 0x26, + 0xc2, 0xc1, 0x58, 0xe2, 0x93, 0xe9, 0x4f, 0x0a, 0x5d, 0xa6, 0x3f, 0xd9, 0x82, 0xf1, 0x3b, 0x5b, + 0xc4, 0xbb, 0xe9, 0x85, 0x4e, 0xe4, 0x86, 0x1b, 0x2e, 0xb3, 0x2b, 0xf3, 0x79, 0x23, 0x73, 0x50, + 0x8f, 0xdf, 0x4e, 0x12, 0x1c, 0xec, 0x97, 0xcf, 0x19, 0x80, 0xb8, 0xc9, 0x7c, 0x23, 0xc1, 0x69, + 0xa6, 0xe9, 0xec, 0x31, 0x3d, 0x0f, 0x39, 0x7b, 0xcc, 0x8e, 0x2b, 0xbc, 0x51, 0xe4, 0x1b, 0x00, + 0x76, 0x63, 0x5c, 0x51, 0x50, 0xac, 0x51, 0xa0, 0x4f, 0x02, 0xd2, 0x33, 0x74, 0x19, 0xa1, 0x41, + 0x9f, 0xbb, 0xbf, 0x5f, 0x46, 0xab, 0x29, 0xec, 0xc1, 0x7e, 0x79, 0x82, 0x42, 0x97, 0x3c, 0x7a, + 0xf3, 0x8c, 0xe3, 0xa9, 0x65, 0x30, 0x42, 0xb7, 0x61, 0x8c, 0x42, 0xd9, 0x8a, 0x92, 0x71, 0x47, + 0xf9, 0x6d, 0xf1, 0x99, 0xfb, 0xfb, 0xe5, 0xb1, 0xd5, 0x04, 0x2e, 0x8f, 0x75, 0x8a, 0x09, 0x7a, + 0x05, 0x46, 0xe2, 0x79, 0x75, 0x9d, 0xec, 0xf1, 0x00, 0x3d, 0x25, 0xae, 0xf0, 0x5e, 0x31, 0x30, + 0x38, 0x41, 0x69, 0x7f, 0xc1, 0x82, 0x33, 0xb9, 0x19, 0xf1, 0xd1, 0x25, 0x18, 0x70, 0x9a, 0x2e, + 0x37, 0x5f, 0x88, 0xa3, 0x86, 0xa9, 0xc9, 0x2a, 0x4b, 0xdc, 0x78, 0xa1, 0xb0, 0x74, 0x87, 0xdf, + 0x76, 0xbd, 0x7a, 0x72, 0x87, 0xbf, 0xee, 0x7a, 0x75, 0xcc, 0x30, 0xea, 0xc8, 0x2a, 0xe6, 0x3e, + 0x45, 0xf8, 0x0a, 0x5d, 0xab, 0x19, 0xb9, 0xf3, 0x8f, 0xb7, 0x19, 0xe8, 0x19, 0xdd, 0xd4, 0x28, + 0xbc, 0x0a, 0x73, 0xcd, 0x8c, 0xdf, 0x67, 0x81, 0x78, 0xba, 0xdc, 0xc5, 0x99, 0xfc, 0x26, 0x0c, + 0xed, 0xa6, 0xb3, 0x17, 0x5e, 0xc8, 0x7f, 0xcb, 0x2d, 0x22, 0xae, 0x2b, 0x41, 0xdb, 0xc8, 0x54, + 0x68, 0xf0, 0xb2, 0xeb, 0x20, 0xb0, 0xf3, 0x84, 0x19, 0x14, 0x3a, 0xb7, 0xe6, 0x79, 0x80, 0x3a, + 0xa3, 0x65, 0x29, 0x8d, 0x0b, 0xa6, 0xc4, 0x35, 0xaf, 0x30, 0x58, 0xa3, 0xb2, 0xff, 0x79, 0x01, + 0x06, 0x65, 0xb6, 0xbc, 0x96, 0xd7, 0x8d, 0xda, 0xef, 0x50, 0xe9, 0xb3, 0xd1, 0x65, 0x28, 0x31, + 0xbd, 0x74, 0x25, 0xd6, 0x96, 0x2a, 0xad, 0xd0, 0x8a, 0x44, 0xe0, 0x98, 0x86, 0xee, 0x8e, 0x61, + 0x6b, 0x9d, 0x91, 0x27, 0x1e, 0xda, 0x56, 0x39, 0x18, 0x4b, 0x3c, 0xfa, 0x18, 0x8c, 0xf1, 0x72, + 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x2d, 0xab, 0x57, 0x45, 0x2f, 0x19, 0x5b, 0x49, 0xe0, 0x0e, 0xf6, + 0xcb, 0x27, 0x92, 0x30, 0x66, 0xa4, 0x4d, 0x71, 0x61, 0x2e, 0x6b, 0xbc, 0x12, 0xba, 0xab, 0xa7, + 0x3c, 0xdd, 0x62, 0x14, 0xd6, 0xe9, 0xec, 0x4f, 0x03, 0x4a, 0xe7, 0x0d, 0x44, 0xaf, 0x73, 0x97, + 0x67, 0x37, 0x20, 0xf5, 0x76, 0x46, 0x5b, 0x3d, 0x46, 0x87, 0x7c, 0x23, 0xc7, 0x4b, 0x61, 0x55, + 0xde, 0xfe, 0x3f, 0x8b, 0x30, 0x96, 0x8c, 0x0a, 0x80, 0xae, 0x41, 0x1f, 0x17, 0x29, 0x05, 0xfb, + 0x36, 0x3e, 0x41, 0x5a, 0x2c, 0x01, 0x76, 0xb8, 0x0a, 0xa9, 0x54, 0x94, 0x47, 0x6f, 0xc1, 0x60, + 0xdd, 0xbf, 0xe3, 0xdd, 0x71, 0x82, 0xfa, 0x4c, 0x65, 0x49, 0x4c, 0xe7, 0x4c, 0x45, 0xc5, 0x7c, + 0x4c, 0xa6, 0xc7, 0x27, 0x60, 0xf6, 0xef, 0x18, 0x85, 0x75, 0x76, 0x68, 0x8d, 0x25, 0xfa, 0xd8, + 0x70, 0x37, 0x57, 0x9c, 0x66, 0xbb, 0xf7, 0x2f, 0x73, 0x92, 0x48, 0xe3, 0x3c, 0x2c, 0xb2, 0x81, + 0x70, 0x04, 0x8e, 0x19, 0xa1, 0xcf, 0xc2, 0x44, 0x98, 0x63, 0x3a, 0xc9, 0x4b, 0x23, 0xdb, 0xce, + 0x9a, 0x30, 0x7b, 0xfa, 0xfe, 0x7e, 0x79, 0x22, 0xcb, 0xc8, 0x92, 0x55, 0x8d, 0xfd, 0xa5, 0x13, + 0x60, 0x2c, 0x62, 0x23, 0xab, 0xb8, 0x75, 0x44, 0x59, 0xc5, 0x31, 0x0c, 0x90, 0x9d, 0x66, 0xb4, + 0x37, 0xef, 0x06, 0x62, 0x4c, 0x32, 0x79, 0x2e, 0x08, 0x9a, 0x34, 0x4f, 0x89, 0xc1, 0x8a, 0x4f, + 0x76, 0xea, 0xf7, 0xe2, 0x37, 0x31, 0xf5, 0x7b, 0xcf, 0x31, 0xa6, 0x7e, 0x5f, 0x85, 0xfe, 0x4d, + 0x37, 0xc2, 0xa4, 0xe9, 0x8b, 0xcb, 0x5c, 0xe6, 0x3c, 0xbc, 0xca, 0x49, 0xd2, 0x49, 0x86, 0x05, + 0x02, 0x4b, 0x26, 0xe8, 0x75, 0xb5, 0x02, 0xfb, 0xf2, 0x15, 0x2e, 0x69, 0xe7, 0x95, 0xcc, 0x35, + 0x28, 0x12, 0xbc, 0xf7, 0x3f, 0x68, 0x82, 0xf7, 0x45, 0x99, 0x96, 0x7d, 0x20, 0xff, 0xb1, 0x1a, + 0xcb, 0xba, 0xde, 0x21, 0x19, 0xfb, 0x2d, 0x3d, 0x95, 0x7d, 0x29, 0x7f, 0x27, 0x50, 0x59, 0xea, + 0xbb, 0x4c, 0x60, 0xff, 0x7d, 0x16, 0x9c, 0x4c, 0xa6, 0x9a, 0x65, 0x6f, 0x2a, 0x84, 0x9f, 0xc7, + 0x4b, 0xdd, 0xe4, 0xfe, 0x65, 0x05, 0x8c, 0x0a, 0x99, 0x8e, 0x34, 0x93, 0x0c, 0x67, 0x57, 0x47, + 0x3b, 0x3a, 0x58, 0xaf, 0x0b, 0x7f, 0x83, 0xc7, 0x72, 0x32, 0xe1, 0xb7, 0xc9, 0x7f, 0xbf, 0x96, + 0x91, 0x75, 0xfd, 0xf1, 0xbc, 0xac, 0xeb, 0x5d, 0xe7, 0x5a, 0x7f, 0x5d, 0xe5, 0xc0, 0x1f, 0xce, + 0x9f, 0x4a, 0x3c, 0xc3, 0x7d, 0xc7, 0xcc, 0xf7, 0xaf, 0xab, 0xcc, 0xf7, 0x6d, 0x22, 0x8b, 0xf3, + 0xbc, 0xf6, 0x1d, 0xf3, 0xdd, 0x6b, 0x39, 0xeb, 0x47, 0x8f, 0x26, 0x67, 0xbd, 0x71, 0xd4, 0xf0, + 0xb4, 0xe9, 0xcf, 0x74, 0x38, 0x6a, 0x0c, 0xbe, 0xed, 0x0f, 0x1b, 0x9e, 0x9f, 0x7f, 0xfc, 0x81, + 0xf2, 0xf3, 0xdf, 0xd2, 0xf3, 0xdd, 0xa3, 0x0e, 0x09, 0xdd, 0x29, 0x51, 0x97, 0x59, 0xee, 0x6f, + 0xe9, 0x07, 0xe0, 0x44, 0x3e, 0x5f, 0x75, 0xce, 0xa5, 0xf9, 0x66, 0x1e, 0x81, 0xa9, 0xec, 0xf9, + 0x27, 0x8e, 0x27, 0x7b, 0xfe, 0xc9, 0x23, 0xcf, 0x9e, 0x7f, 0xea, 0x18, 0xb2, 0xe7, 0x9f, 0x3e, + 0xc6, 0xec, 0xf9, 0xb7, 0x98, 0x73, 0x14, 0x0f, 0x00, 0x25, 0x22, 0xa1, 0x3f, 0x95, 0x13, 0x3f, + 0x2d, 0x1d, 0x25, 0x8a, 0x7f, 0x9c, 0x42, 0xe1, 0x98, 0x55, 0x46, 0x56, 0xfe, 0xc9, 0x87, 0x90, + 0x95, 0x7f, 0x35, 0xce, 0xca, 0x7f, 0x26, 0x7f, 0xa8, 0x33, 0x9e, 0xd3, 0xe4, 0xe4, 0xe2, 0xbf, + 0xa5, 0xe7, 0xd0, 0x7f, 0xa4, 0x8d, 0x15, 0x2c, 0x4b, 0xa1, 0xdc, 0x26, 0x73, 0xfe, 0x6b, 0x3c, + 0x73, 0xfe, 0xd9, 0xfc, 0x9d, 0x3c, 0x79, 0xdc, 0x19, 0xf9, 0xf2, 0x69, 0xbb, 0x54, 0xf0, 0x57, + 0x16, 0xf3, 0x3d, 0xa7, 0x5d, 0x2a, 0x7a, 0x6c, 0xba, 0x5d, 0x0a, 0x85, 0x63, 0x56, 0xf6, 0x0f, + 0x14, 0xe0, 0x7c, 0xfb, 0xf5, 0x16, 0x6b, 0xc9, 0x2b, 0xb1, 0x43, 0x40, 0x42, 0x4b, 0xce, 0xef, + 0x6c, 0x31, 0x55, 0xd7, 0xf1, 0x20, 0xaf, 0xc2, 0xb8, 0x7a, 0x87, 0xd3, 0x70, 0x6b, 0x7b, 0xab, + 0xf1, 0x35, 0x59, 0x45, 0x4e, 0xa8, 0x26, 0x09, 0x70, 0xba, 0x0c, 0x9a, 0x81, 0x51, 0x03, 0xb8, + 0x34, 0x2f, 0xee, 0x66, 0x71, 0x94, 0x71, 0x13, 0x8d, 0x93, 0xf4, 0xf6, 0xcf, 0x59, 0x70, 0x3a, + 0x27, 0xe5, 0x6b, 0xd7, 0xe1, 0x0e, 0x37, 0x60, 0xb4, 0x69, 0x16, 0xed, 0x10, 0xa1, 0xd5, 0x48, + 0x2c, 0xab, 0xda, 0x9a, 0x40, 0xe0, 0x24, 0x53, 0xfb, 0x67, 0x0a, 0x70, 0xae, 0xad, 0x63, 0x29, + 0xc2, 0x70, 0x6a, 0x73, 0x27, 0x74, 0xe6, 0x02, 0x52, 0x27, 0x5e, 0xe4, 0x3a, 0x8d, 0x6a, 0x93, + 0xd4, 0x34, 0x3b, 0x07, 0xf3, 0xd0, 0xbc, 0xba, 0x52, 0x9d, 0x49, 0x53, 0xe0, 0x9c, 0x92, 0x68, + 0x11, 0x50, 0x1a, 0x23, 0x46, 0x98, 0x65, 0x0f, 0x48, 0xf3, 0xc3, 0x19, 0x25, 0xd0, 0x07, 0x61, + 0x58, 0x39, 0xac, 0x6a, 0x23, 0xce, 0x36, 0x76, 0xac, 0x23, 0xb0, 0x49, 0x87, 0xae, 0xf0, 0xf4, + 0x13, 0x22, 0x51, 0x89, 0x30, 0x8a, 0x8c, 0xca, 0xdc, 0x12, 0x02, 0x8c, 0x75, 0x9a, 0xd9, 0x97, + 0x7f, 0xeb, 0x1b, 0xe7, 0xdf, 0xf7, 0xbb, 0xdf, 0x38, 0xff, 0xbe, 0x3f, 0xf8, 0xc6, 0xf9, 0xf7, + 0x7d, 0xd7, 0xfd, 0xf3, 0xd6, 0x6f, 0xdd, 0x3f, 0x6f, 0xfd, 0xee, 0xfd, 0xf3, 0xd6, 0x1f, 0xdc, + 0x3f, 0x6f, 0xfd, 0xf1, 0xfd, 0xf3, 0xd6, 0x17, 0xff, 0xe4, 0xfc, 0xfb, 0xde, 0x44, 0x71, 0x00, + 0xd1, 0xcb, 0x74, 0x74, 0x2e, 0xef, 0x5e, 0xf9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x0b, + 0x0a, 0x3d, 0x91, 0x13, 0x01, 0x00, } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { @@ -8423,70 +8265,6 @@ func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ClusterTrustBundleProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterTrustBundleProjection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterTrustBundleProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x22 - if m.LabelSelector != nil { - { - size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.SignerName != nil { - i -= len(*m.SignerName) - copy(dAtA[i:], *m.SignerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) - i-- - dAtA[i] = 0x12 - } - if m.Name != nil { - i -= len(*m.Name) - copy(dAtA[i:], *m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -11927,18 +11705,6 @@ func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Sleep != nil { - { - size, err := m.Sleep.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } if m.TCPSocket != nil { { size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) @@ -12359,13 +12125,6 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } - if m.IPMode != nil { - i -= len(*m.IPMode) - copy(dAtA[i:], *m.IPMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) - i-- - dAtA[i] = 0x1a - } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -12479,39 +12238,6 @@ func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ModifyVolumeStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ModifyVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ModifyVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.TargetVolumeAttributesClassName) - copy(dAtA[i:], m.TargetVolumeAttributesClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetVolumeAttributesClassName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14031,13 +13757,6 @@ func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l - if m.VolumeAttributesClassName != nil { - i -= len(*m.VolumeAttributesClassName) - copy(dAtA[i:], *m.VolumeAttributesClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) - i-- - dAtA[i] = 0x4a - } if m.DataSourceRef != nil { { size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i]) @@ -14135,25 +13854,6 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er _ = i var l int _ = l - if m.ModifyVolumeStatus != nil { - { - size, err := m.ModifyVolumeStatus.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.CurrentVolumeAttributesClassName != nil { - i -= len(*m.CurrentVolumeAttributesClassName) - copy(dAtA[i:], *m.CurrentVolumeAttributesClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CurrentVolumeAttributesClassName))) - i-- - dAtA[i] = 0x42 - } if len(m.AllocatedResourceStatuses) > 0 { keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses)) for k := range m.AllocatedResourceStatuses { @@ -14714,13 +14414,6 @@ func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.VolumeAttributesClassName != nil { - i -= len(*m.VolumeAttributesClassName) - copy(dAtA[i:], *m.VolumeAttributesClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName))) - i-- - dAtA[i] = 0x52 - } if m.NodeAffinity != nil { { size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) @@ -15029,24 +14722,6 @@ func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.MismatchLabelKeys) > 0 { - for iNdEx := len(m.MismatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MismatchLabelKeys[iNdEx]) - copy(dAtA[i:], m.MismatchLabelKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MismatchLabelKeys[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.MatchLabelKeys) > 0 { - for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MatchLabelKeys[iNdEx]) - copy(dAtA[i:], m.MatchLabelKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } if m.NamespaceSelector != nil { { size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -19569,32 +19244,6 @@ func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SleepAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SleepAction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SleepAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -20301,18 +19950,6 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.ClusterTrustBundle != nil { - { - size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } if m.ServiceAccountToken != nil { { size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) @@ -20364,87 +20001,6 @@ func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *VolumeResourceRequirements) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { - v := m.Requests[ResourceName(keysForRequests[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForRequests[iNdEx]) - copy(dAtA[i:], keysForRequests[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { - v := m.Limits[ResourceName(keysForLimits[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForLimits[iNdEx]) - copy(dAtA[i:], keysForLimits[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *VolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -21337,32 +20893,6 @@ func (m *ClientIPConfig) Size() (n int) { return n } -func (m *ClusterTrustBundleProjection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SignerName != nil { - l = len(*m.SignerName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - func (m *ComponentCondition) Size() (n int) { if m == nil { return 0 @@ -22638,10 +22168,6 @@ func (m *LifecycleHandler) Size() (n int) { l = m.TCPSocket.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Sleep != nil { - l = m.Sleep.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -22773,10 +22299,6 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) - if m.IPMode != nil { - l = len(*m.IPMode) - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -22827,19 +22349,6 @@ func (m *LocalVolumeSource) Size() (n int) { return n } -func (m *ModifyVolumeStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TargetVolumeAttributesClassName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *NFSVolumeSource) Size() (n int) { if m == nil { return 0 @@ -23421,10 +22930,6 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = m.DataSourceRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.VolumeAttributesClassName != nil { - l = len(*m.VolumeAttributesClassName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -23474,14 +22979,6 @@ func (m *PersistentVolumeClaimStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.CurrentVolumeAttributesClassName != nil { - l = len(*m.CurrentVolumeAttributesClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ModifyVolumeStatus != nil { - l = m.ModifyVolumeStatus.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -23669,10 +23166,6 @@ func (m *PersistentVolumeSpec) Size() (n int) { l = m.NodeAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.VolumeAttributesClassName != nil { - l = len(*m.VolumeAttributesClassName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -23766,18 +23259,6 @@ func (m *PodAffinityTerm) Size() (n int) { l = m.NamespaceSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.MatchLabelKeys) > 0 { - for _, s := range m.MatchLabelKeys { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.MismatchLabelKeys) > 0 { - for _, s := range m.MismatchLabelKeys { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } @@ -25419,16 +24900,6 @@ func (m *SessionAffinityConfig) Size() (n int) { return n } -func (m *SleepAction) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Seconds)) - return n -} - func (m *StorageOSPersistentVolumeSource) Size() (n int) { if m == nil { return 0 @@ -25722,37 +25193,6 @@ func (m *VolumeProjection) Size() (n int) { l = m.ServiceAccountToken.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ClusterTrustBundle != nil { - l = m.ClusterTrustBundle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeResourceRequirements) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } return n } @@ -26183,20 +25623,6 @@ func (this *ClientIPConfig) String() string { }, "") return s } -func (this *ClusterTrustBundleProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterTrustBundleProjection{`, - `Name:` + valueToStringGenerated(this.Name) + `,`, - `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} func (this *ComponentCondition) String() string { if this == nil { return "nil" @@ -27151,7 +26577,6 @@ func (this *LifecycleHandler) String() string { `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `Sleep:` + strings.Replace(this.Sleep.String(), "SleepAction", "SleepAction", 1) + `,`, `}`, }, "") return s @@ -27291,7 +26716,6 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -27333,17 +26757,6 @@ func (this *LocalVolumeSource) String() string { }, "") return s } -func (this *ModifyVolumeStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ModifyVolumeStatus{`, - `TargetVolumeAttributesClassName:` + fmt.Sprintf("%v", this.TargetVolumeAttributesClassName) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `}`, - }, "") - return s -} func (this *NFSVolumeSource) String() string { if this == nil { return "nil" @@ -27793,14 +27206,13 @@ func (this *PersistentVolumeClaimSpec) String() string { } s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "VolumeResourceRequirements", "VolumeResourceRequirements", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedObjectReference", "TypedObjectReference", 1) + `,`, - `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -27851,8 +27263,6 @@ func (this *PersistentVolumeClaimStatus) String() string { `Conditions:` + repeatedStringForConditions + `,`, `AllocatedResources:` + mapStringForAllocatedResources + `,`, `AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`, - `CurrentVolumeAttributesClassName:` + valueToStringGenerated(this.CurrentVolumeAttributesClassName) + `,`, - `ModifyVolumeStatus:` + strings.Replace(this.ModifyVolumeStatus.String(), "ModifyVolumeStatus", "ModifyVolumeStatus", 1) + `,`, `}`, }, "") return s @@ -27950,7 +27360,6 @@ func (this *PersistentVolumeSpec) String() string { `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`, `}`, }, "") return s @@ -28021,8 +27430,6 @@ func (this *PodAffinityTerm) String() string { `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`, - `MismatchLabelKeys:` + fmt.Sprintf("%v", this.MismatchLabelKeys) + `,`, `}`, }, "") return s @@ -29302,16 +28709,6 @@ func (this *SessionAffinityConfig) String() string { }, "") return s } -func (this *SleepAction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SleepAction{`, - `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, - `}`, - }, "") - return s -} func (this *StorageOSPersistentVolumeSource) String() string { if this == nil { return "nil" @@ -29513,38 +28910,6 @@ func (this *VolumeProjection) String() string { `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeResourceRequirements) String() string { - if this == nil { - return "nil" - } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) - } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) - } - mapStringForRequests += "}" - s := strings.Join([]string{`&VolumeResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, `}`, }, "") return s @@ -32672,7 +32037,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32695,15 +32060,15 @@ func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32731,12 +32096,11 @@ func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32764,14 +32128,13 @@ func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.SignerName = &s + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32781,31 +32144,27 @@ func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.LabelSelector == nil { - m.LabelSelector = &v1.LabelSelector{} - } - if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32833,29 +32192,8 @@ func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32877,7 +32215,7 @@ func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32900,113 +32238,17 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33016,110 +32258,28 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -43888,42 +43048,6 @@ func (m *LifecycleHandler) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sleep", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Sleep == nil { - m.Sleep = &SleepAction{} - } - if err := m.Sleep.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -45199,39 +44323,6 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) - m.IPMode = &s - iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) @@ -45568,7 +44659,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -45591,15 +44682,15 @@ func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ModifyVolumeStatus: wiretype end group for non-group") + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ModifyVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetVolumeAttributesClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -45627,11 +44718,11 @@ func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetVolumeAttributesClassName = string(dAtA[iNdEx:postIndex]) + m.Server = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -45659,8 +44750,28 @@ func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = PersistentVolumeClaimModifyVolumeStatus(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -45682,7 +44793,7 @@ func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *Namespace) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -45705,17 +44816,17 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45725,278 +44836,144 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Server = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Namespace) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -50798,39 +49775,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.VolumeAttributesClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51364,75 +50308,6 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue)) iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentVolumeAttributesClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.CurrentVolumeAttributesClassName = &s - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ModifyVolumeStatus", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ModifyVolumeStatus == nil { - m.ModifyVolumeStatus = &ModifyVolumeStatus{} - } - if err := m.ModifyVolumeStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -53055,39 +51930,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.VolumeAttributesClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -53837,70 +52679,6 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MismatchLabelKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MismatchLabelKeys = append(m.MismatchLabelKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -68096,75 +66874,6 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *SleepAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SleepAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SleepAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - m.Seconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Seconds |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -70709,350 +69418,6 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterTrustBundle", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClusterTrustBundle == nil { - m.ClusterTrustBundle = &ClusterTrustBundleProjection{} - } - if err := m.ClusterTrustBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeResourceRequirements) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeResourceRequirements: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Limits == nil { - m.Limits = make(ResourceList) - } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Limits[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Requests == nil { - m.Requests = make(ResourceList) - } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Requests[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index d099238cdf6..901e837313f 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -228,8 +228,10 @@ message CSIPersistentVolumeSource { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional optional SecretReference nodeExpandSecretRef = 10; } @@ -431,40 +433,6 @@ message ClientIPConfig { optional int32 timeoutSeconds = 1; } -// ClusterTrustBundleProjection describes how to select a set of -// ClusterTrustBundle objects and project their contents into the pod -// filesystem. -message ClusterTrustBundleProjection { - // Select a single ClusterTrustBundle by object name. Mutually-exclusive - // with signerName and labelSelector. - // +optional - optional string name = 1; - - // Select all ClusterTrustBundles that match this signer name. - // Mutually-exclusive with name. The contents of all selected - // ClusterTrustBundles will be unified and deduplicated. - // +optional - optional string signerName = 2; - - // Select all ClusterTrustBundles that match this label selector. Only has - // effect if signerName is set. Mutually-exclusive with name. If unset, - // interpreted as "match nothing". If set but empty, interpreted as "match - // everything". - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3; - - // If true, don't block pod startup if the referenced ClusterTrustBundle(s) - // aren't available. If using name, then the named ClusterTrustBundle is - // allowed not to exist. If using signerName, then the combination of - // signerName and labelSelector is allowed to match zero - // ClusterTrustBundles. - // +optional - optional bool optional = 5; - - // Relative path from the volume root to write the bundle. - optional string path = 4; -} - // Information about the condition of a component. message ComponentCondition { // Type of condition for a component. @@ -1191,7 +1159,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -2120,11 +2088,6 @@ message LifecycleHandler { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional optional TCPSocketAction tcpSocket = 3; - - // Sleep represents the duration that the container should sleep before being terminated. - // +featureGate=PodLifecycleSleepAction - // +optional - optional SleepAction sleep = 4; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. @@ -2208,15 +2171,6 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; - // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. - // Setting this to "VIP" indicates that traffic is delivered to the node with - // the destination set to the load-balancer's IP and port. - // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with - // the destination set to the node's IP and node port or the pod's IP and port. - // Service implementations may use this information to adjust traffic routing. - // +optional - optional string ipMode = 3; - // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -2257,24 +2211,6 @@ message LocalVolumeSource { optional string fsType = 2; } -// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation -message ModifyVolumeStatus { - // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled - optional string targetVolumeAttributesClassName = 1; - - // status is the status of the ControllerModifyVolume operation. It can be in any of following states: - // - Pending - // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as - // the specified VolumeAttributesClass not existing. - // - InProgress - // InProgress indicates that the volume is being modified. - // - Infeasible - // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To - // resolve the error, a valid VolumeAttributesClass needs to be specified. - // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. - optional string status = 2; -} - // Represents an NFS mount that lasts the lifetime of a pod. // NFS volumes do not support ownership management or SELinux relabeling. message NFSVolumeSource { @@ -2880,7 +2816,7 @@ message PersistentVolumeClaimSpec { // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - optional VolumeResourceRequirements resources = 2; + optional ResourceRequirements resources = 2; // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional @@ -2932,22 +2868,6 @@ message PersistentVolumeClaimSpec { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional optional TypedObjectReference dataSourceRef = 8; - - // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - // If specified, the CSI driver will create or update the volume with the attributes defined - // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - // will be set by the persistentvolume controller if it exists. - // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - // exists. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. - // +featureGate=VolumeAttributesClass - // +optional - optional string volumeAttributesClassName = 9; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -3037,20 +2957,6 @@ message PersistentVolumeClaimStatus { // +mapType=granular // +optional map allocatedResourceStatuses = 7; - - // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. - // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. - // +featureGate=VolumeAttributesClass - // +optional - optional string currentVolumeAttributesClassName = 8; - - // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. - // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. - // +featureGate=VolumeAttributesClass - // +optional - optional ModifyVolumeStatus modifyVolumeStatus = 9; } // PersistentVolumeClaimTemplate is used to produce @@ -3255,17 +3161,6 @@ message PersistentVolumeSpec { // This field influences the scheduling of pods that use this volume. // +optional optional VolumeNodeAffinity nodeAffinity = 9; - - // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value - // is not allowed. When this field is not set, it indicates that this volume does not belong to any - // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver - // after a volume has been updated successfully to a new class. - // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound - // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. - // +featureGate=VolumeAttributesClass - // +optional - optional string volumeAttributesClassName = 10; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -3286,7 +3181,7 @@ message PersistentVolumeStatus { // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). + // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4; @@ -3358,7 +3253,6 @@ message PodAffinity { // a pod of the set of pods is running message PodAffinityTerm { // A label query over a set of resources, in this case pods. - // If it's null, this PodAffinityTerm matches with no Pods. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; @@ -3383,32 +3277,6 @@ message PodAffinityTerm { // An empty selector ({}) matches all namespaces. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4; - - // MatchLabelKeys is a set of pod label keys to select which pods will - // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` - // to select the group of existing pods which pods will be taken into consideration - // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - // +listType=atomic - // +optional - repeated string matchLabelKeys = 5; - - // MismatchLabelKeys is a set of pod label keys to select which pods will - // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` - // to select the group of existing pods which pods will be taken into consideration - // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - // +listType=atomic - // +optional - repeated string mismatchLabelKeys = 6; } // Pod anti affinity is a group of inter pod anti affinity scheduling rules. @@ -5382,7 +5250,7 @@ message ServicePort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5685,12 +5553,6 @@ message SessionAffinityConfig { optional ClientIPConfig clientIP = 1; } -// SleepAction describes a "sleep" action. -message SleepAction { - // Seconds is the number of seconds to sleep. - optional int64 seconds = 1; -} - // Represents a StorageOS persistent volume resource. message StorageOSPersistentVolumeSource { // volumeName is the human-readable name of the StorageOS volume. Volume @@ -6098,39 +5960,6 @@ message VolumeProjection { // serviceAccountToken is information about the serviceAccountToken data to project // +optional optional ServiceAccountTokenProjection serviceAccountToken = 4; - - // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - // of ClusterTrustBundle objects in an auto-updating file. - // - // Alpha, gated by the ClusterTrustBundleProjection feature gate. - // - // ClusterTrustBundle objects can either be selected by name, or by the - // combination of signer name and a label selector. - // - // Kubelet performs aggressive normalization of the PEM contents written - // into the pod filesystem. Esoteric PEM features such as inter-block - // comments and block headers are stripped. Certificates are deduplicated. - // The ordering of certificates within the file is arbitrary, and Kubelet - // may change the order over time. - // - // +featureGate=ClusterTrustBundleProjection - // +optional - optional ClusterTrustBundleProjection clusterTrustBundle = 5; -} - -// VolumeResourceRequirements describes the storage resource requirements for a volume. -message VolumeResourceRequirements { - // Limits describes the maximum amount of compute resources allowed. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - map limits = 1; - - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. Requests cannot exceed Limits. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - map requests = 2; } // Represents the source of a volume to mount. diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 61ba21bcad4..9e05c223565 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -363,16 +363,6 @@ type PersistentVolumeSpec struct { // This field influences the scheduling of pods that use this volume. // +optional NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` - // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value - // is not allowed. When this field is not set, it indicates that this volume does not belong to any - // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver - // after a volume has been updated successfully to a new class. - // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound - // PersistentVolumeClaims during the binding process. - // This is an alpha field and requires enabling VolumeAttributesClass feature. - // +featureGate=VolumeAttributesClass - // +optional - VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"` } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -423,7 +413,7 @@ type PersistentVolumeStatus struct { Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` // lastPhaseTransitionTime is the time the phase transitioned from one to another // and automatically resets to current time everytime a volume phase transitions. - // This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default). + // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. // +featureGate=PersistentVolumeLastPhaseTransitionTime // +optional LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"` @@ -496,7 +486,7 @@ type PersistentVolumeClaimSpec struct { // status field of the claim. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources // +optional - Resources VolumeResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` // volumeName is the binding reference to the PersistentVolume backing this claim. // +optional VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` @@ -543,21 +533,6 @@ type PersistentVolumeClaimSpec struct { // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. // +optional DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"` - // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. - // If specified, the CSI driver will create or update the volume with the attributes defined - // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - // will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - // will be set by the persistentvolume controller if it exists. - // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be - // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource - // exists. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass - // (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. - // +featureGate=VolumeAttributesClass - // +optional - VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"` } type TypedObjectReference struct { @@ -586,11 +561,6 @@ const ( PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" - - // Applying the target VolumeAttributesClass encountered an error - PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError" - // Volume is being modified - PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume" ) // +enum @@ -617,38 +587,6 @@ const ( PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" ) -// +enum -// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately -type PersistentVolumeClaimModifyVolumeStatus string - -const ( - // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as - // the specified VolumeAttributesClass not existing - PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending" - // InProgress indicates that the volume is being modified - PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress" - // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To - // resolve the error, a valid VolumeAttributesClass needs to be specified - PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible" -) - -// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation -type ModifyVolumeStatus struct { - // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled - TargetVolumeAttributesClassName string `json:"targetVolumeAttributesClassName,omitempty" protobuf:"bytes,1,opt,name=targetVolumeAttributesClassName"` - // status is the status of the ControllerModifyVolume operation. It can be in any of following states: - // - Pending - // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as - // the specified VolumeAttributesClass not existing. - // - InProgress - // InProgress indicates that the volume is being modified. - // - Infeasible - // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To - // resolve the error, a valid VolumeAttributesClass needs to be specified. - // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately. - Status PersistentVolumeClaimModifyVolumeStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=PersistentVolumeClaimModifyVolumeStatus"` -} - // PersistentVolumeClaimCondition contains details about state of pvc type PersistentVolumeClaimCondition struct { Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` @@ -755,18 +693,6 @@ type PersistentVolumeClaimStatus struct { // +mapType=granular // +optional AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"` - // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. - // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim - // This is an alpha field and requires enabling VolumeAttributesClass feature. - // +featureGate=VolumeAttributesClass - // +optional - CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"` - // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. - // When this is unset, there is no ModifyVolume operation being attempted. - // This is an alpha field and requires enabling VolumeAttributesClass feature. - // +featureGate=VolumeAttributesClass - // +optional - ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"` } // +enum @@ -1837,40 +1763,6 @@ type ServiceAccountTokenProjection struct { Path string `json:"path" protobuf:"bytes,3,opt,name=path"` } -// ClusterTrustBundleProjection describes how to select a set of -// ClusterTrustBundle objects and project their contents into the pod -// filesystem. -type ClusterTrustBundleProjection struct { - // Select a single ClusterTrustBundle by object name. Mutually-exclusive - // with signerName and labelSelector. - // +optional - Name *string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"` - - // Select all ClusterTrustBundles that match this signer name. - // Mutually-exclusive with name. The contents of all selected - // ClusterTrustBundles will be unified and deduplicated. - // +optional - SignerName *string `json:"signerName,omitempty" protobuf:"bytes,2,rep,name=signerName"` - - // Select all ClusterTrustBundles that match this label selector. Only has - // effect if signerName is set. Mutually-exclusive with name. If unset, - // interpreted as "match nothing". If set but empty, interpreted as "match - // everything". - // +optional - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,3,rep,name=labelSelector"` - - // If true, don't block pod startup if the referenced ClusterTrustBundle(s) - // aren't available. If using name, then the named ClusterTrustBundle is - // allowed not to exist. If using signerName, then the combination of - // signerName and labelSelector is allowed to match zero - // ClusterTrustBundles. - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,5,opt,name=optional"` - - // Relative path from the volume root to write the bundle. - Path string `json:"path" protobuf:"bytes,4,rep,name=path"` -} - // Represents a projected volume source type ProjectedVolumeSource struct { // sources is the list of volume projections @@ -1902,24 +1794,6 @@ type VolumeProjection struct { // serviceAccountToken is information about the serviceAccountToken data to project // +optional ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` - - // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field - // of ClusterTrustBundle objects in an auto-updating file. - // - // Alpha, gated by the ClusterTrustBundleProjection feature gate. - // - // ClusterTrustBundle objects can either be selected by name, or by the - // combination of signer name and a label selector. - // - // Kubelet performs aggressive normalization of the PEM contents written - // into the pod filesystem. Esoteric PEM features such as inter-block - // comments and block headers are stripped. Certificates are deduplicated. - // The ordering of certificates within the file is arbitrary, and Kubelet - // may change the order over time. - // - // +featureGate=ClusterTrustBundleProjection - // +optional - ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"` } const ( @@ -2020,8 +1894,10 @@ type CSIPersistentVolumeSource struct { // nodeExpandSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodeExpandVolume call. + // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. // This field is optional, may be omitted if no secret is required. If the // secret object contains more than one secret, all secrets are passed. + // +featureGate=CSINodeExpandSecret // +optional NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"` } @@ -2396,12 +2272,6 @@ type ExecAction struct { Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` } -// SleepAction describes a "sleep" action. -type SleepAction struct { - // Seconds is the number of seconds to sleep. - Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"` -} - // Probe describes a health check to be performed against a container to determine whether it is // alive or ready to receive traffic. type Probe struct { @@ -2547,27 +2417,6 @@ type ResourceRequirements struct { Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` } -// VolumeResourceRequirements describes the storage resource requirements for a volume. -type VolumeResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. Requests cannot exceed Limits. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - // +optional - Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` - - // Claims got added by accident when volumes shared the ResourceRequirements struct - // with containers. Stripping the field got added in 1.27 and was backported to 1.26. - // Starting with Kubernetes 1.28, this field is not part of the volume API anymore. - // - // Future extensions must not use "claims" or field number 3. - // Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"` -} - // ResourceClaim references one entry in PodSpec.ResourceClaims. type ResourceClaim struct { // Name must match the name of one entry in pod.spec.resourceClaims of @@ -2797,10 +2646,6 @@ type LifecycleHandler struct { // lifecycle hooks will fail in runtime when tcp handler is specified. // +optional TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` - // Sleep represents the duration that the container should sleep before being terminated. - // +featureGate=PodLifecycleSleepAction - // +optional - Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"` } // Lifecycle describes actions that the management system should take in response to container lifecycle @@ -3000,9 +2845,6 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" - // PodReadyToStartContainers pod sandbox is successfully configured and - // the pod is ready to launch containers. - PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -3294,7 +3136,6 @@ type WeightedPodAffinityTerm struct { // a pod of the set of pods is running type PodAffinityTerm struct { // A label query over a set of resources, in this case pods. - // If it's null, this PodAffinityTerm matches with no Pods. // +optional LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` // namespaces specifies a static list of namespace names that the term applies to. @@ -3316,30 +3157,6 @@ type PodAffinityTerm struct { // An empty selector ({}) matches all namespaces. // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"` - // MatchLabelKeys is a set of pod label keys to select which pods will - // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` - // to select the group of existing pods which pods will be taken into consideration - // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - // Also, MatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - // +listType=atomic - // +optional - MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"` - // MismatchLabelKeys is a set of pod label keys to select which pods will - // be taken into consideration. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` - // to select the group of existing pods which pods will be taken into consideration - // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming - // pod labels will be ignored. The default value is empty. - // The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. - // Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. - // This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. - // +listType=atomic - // +optional - MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"` } // Node affinity is a group of node affinity scheduling rules. @@ -4875,15 +4692,6 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` - // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. - // Setting this to "VIP" indicates that traffic is delivered to the node with - // the destination set to the load-balancer's IP and port. - // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with - // the destination set to the node's IP and node port or the pod's IP and port. - // Service implementations may use this information to adjust traffic routing. - // +optional - IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` - // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -4901,8 +4709,6 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" - // IPFamilyUnknown indicates that this IP is unknown protocol - IPFamilyUnknown IPFamily = "" ) // IPFamilyPolicy represents the dual-stack-ness requested or required by a Service @@ -5197,7 +5003,7 @@ type ServicePort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -5441,7 +5247,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // @@ -7248,15 +7054,3 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } - -// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP -type LoadBalancerIPMode string - -const ( - // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with - // the destination set to the load-balancer's IP and port. - LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" - // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with - // the destination set to the node's IP and port or the pod's IP and port. - LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" -) diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index fd6f7dc61b9..9734d8b41eb 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -127,7 +127,7 @@ var map_CSIPersistentVolumeSource = map[string]string{ "nodeStageSecretRef": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", "controllerExpandSecretRef": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeExpandSecretRef": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -228,19 +228,6 @@ func (ClientIPConfig) SwaggerDoc() map[string]string { return map_ClientIPConfig } -var map_ClusterTrustBundleProjection = map[string]string{ - "": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", - "name": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", - "signerName": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", - "labelSelector": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".", - "optional": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", - "path": "Relative path from the volume root to write the bundle.", -} - -func (ClusterTrustBundleProjection) SwaggerDoc() map[string]string { - return map_ClusterTrustBundleProjection -} - var map_ComponentCondition = map[string]string{ "": "Information about the condition of a component.", "type": "Type of condition for a component. Valid value: \"Healthy\"", @@ -544,7 +531,7 @@ var map_EndpointPort = map[string]string{ "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", "port": "The port number of the endpoint.", "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -948,7 +935,6 @@ var map_LifecycleHandler = map[string]string{ "exec": "Exec specifies the action to take.", "httpGet": "HTTPGet specifies the http request to perform.", "tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", - "sleep": "Sleep represents the duration that the container should sleep before being terminated.", } func (LifecycleHandler) SwaggerDoc() map[string]string { @@ -1002,7 +988,6 @@ var map_LoadBalancerIngress = map[string]string{ "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", - "ipMode": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", "ports": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", } @@ -1038,16 +1023,6 @@ func (LocalVolumeSource) SwaggerDoc() map[string]string { return map_LocalVolumeSource } -var map_ModifyVolumeStatus = map[string]string{ - "": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", - "targetVolumeAttributesClassName": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", - "status": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.", -} - -func (ModifyVolumeStatus) SwaggerDoc() map[string]string { - return map_ModifyVolumeStatus -} - var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", "server": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", @@ -1364,16 +1339,15 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "selector": "selector is a label query over volumes to consider for binding.", - "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", - "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", - "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", - "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", - "volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "selector": "selector is a label query over volumes to consider for binding.", + "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "dataSource": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", + "dataSourceRef": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1381,15 +1355,13 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { } var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "capacity represents the actual resources of the underlying volume.", - "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", - "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", - "currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", - "modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "capacity represents the actual resources of the underlying volume.", + "conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", } func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { @@ -1466,7 +1438,6 @@ var map_PersistentVolumeSpec = map[string]string{ "mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - "volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1478,7 +1449,7 @@ var map_PersistentVolumeStatus = map[string]string{ "phase": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", "message": "message is a human-readable message indicating details about why the volume is in this state.", "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + "lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", } func (PersistentVolumeStatus) SwaggerDoc() map[string]string { @@ -1518,12 +1489,10 @@ func (PodAffinity) SwaggerDoc() map[string]string { var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", + "labelSelector": "A label query over a set of resources, in this case pods.", "namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", - "matchLabelKeys": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", - "mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -2356,7 +2325,7 @@ var map_ServicePort = map[string]string{ "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", "port": "The port that will be exposed by this service.", "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", "nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", @@ -2421,15 +2390,6 @@ func (SessionAffinityConfig) SwaggerDoc() map[string]string { return map_SessionAffinityConfig } -var map_SleepAction = map[string]string{ - "": "SleepAction describes a \"sleep\" action.", - "seconds": "Seconds is the number of seconds to sleep.", -} - -func (SleepAction) SwaggerDoc() map[string]string { - return map_SleepAction -} - var map_StorageOSPersistentVolumeSource = map[string]string{ "": "Represents a StorageOS persistent volume resource.", "volumeName": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", @@ -2606,23 +2566,12 @@ var map_VolumeProjection = map[string]string{ "downwardAPI": "downwardAPI information about the downwardAPI data to project", "configMap": "configMap information about the configMap data to project", "serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project", - "clusterTrustBundle": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", } func (VolumeProjection) SwaggerDoc() map[string]string { return map_VolumeProjection } -var map_VolumeResourceRequirements = map[string]string{ - "": "VolumeResourceRequirements describes the storage resource requirements for a volume.", - "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", -} - -func (VolumeResourceRequirements) SwaggerDoc() map[string]string { - return map_VolumeResourceRequirements -} - var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", "hostPath": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 45172e0e233..d76f0bbbcf7 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -466,42 +466,6 @@ func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) { - *out = *in - if in.Name != nil { - in, out := &in.Name, &out.Name - *out = new(string) - **out = **in - } - if in.SignerName != nil { - in, out := &in.SignerName, &out.SignerName - *out = new(string) - **out = **in - } - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection. -func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection { - if in == nil { - return nil - } - out := new(ClusterTrustBundleProjection) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { *out = *in @@ -2081,11 +2045,6 @@ func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { *out = new(TCPSocketAction) **out = **in } - if in.Sleep != nil { - in, out := &in.Sleep, &out.Sleep - *out = new(SleepAction) - **out = **in - } return } @@ -2269,11 +2228,6 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in - if in.IPMode != nil { - in, out := &in.IPMode, &out.IPMode - *out = new(LoadBalancerIPMode) - **out = **in - } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) @@ -2354,22 +2308,6 @@ func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus. -func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus { - if in == nil { - return nil - } - out := new(ModifyVolumeStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { *out = *in @@ -3118,11 +3056,6 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec *out = new(TypedObjectReference) (*in).DeepCopyInto(*out) } - if in.VolumeAttributesClassName != nil { - in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName - *out = new(string) - **out = **in - } return } @@ -3172,16 +3105,6 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt (*out)[key] = val } } - if in.CurrentVolumeAttributesClassName != nil { - in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName - *out = new(string) - **out = **in - } - if in.ModifyVolumeStatus != nil { - in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus - *out = new(ModifyVolumeStatus) - **out = **in - } return } @@ -3424,11 +3347,6 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = new(VolumeNodeAffinity) (*in).DeepCopyInto(*out) } - if in.VolumeAttributesClassName != nil { - in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName - *out = new(string) - **out = **in - } return } @@ -3554,16 +3472,6 @@ func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } - if in.MatchLabelKeys != nil { - in, out := &in.MatchLabelKeys, &out.MatchLabelKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MismatchLabelKeys != nil { - in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } return } @@ -5773,22 +5681,6 @@ func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SleepAction) DeepCopyInto(out *SleepAction) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction. -func (in *SleepAction) DeepCopy() *SleepAction { - if in == nil { - return nil - } - out := new(SleepAction) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { *out = *in @@ -6135,11 +6027,6 @@ func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { *out = new(ServiceAccountTokenProjection) (*in).DeepCopyInto(*out) } - if in.ClusterTrustBundle != nil { - in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle - *out = new(ClusterTrustBundleProjection) - (*in).DeepCopyInto(*out) - } return } @@ -6153,36 +6040,6 @@ func (in *VolumeProjection) DeepCopy() *VolumeProjection { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements. -func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements { - if in == nil { - return nil - } - out := new(VolumeResourceRequirements) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = *in diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto index 6d234017b72..490ce892247 100644 --- a/vendor/k8s.io/api/discovery/v1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1/generated.proto @@ -118,7 +118,7 @@ message EndpointHints { // +structType=atomic message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -145,7 +145,7 @@ message EndpointPort { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go index 7ebb07ca359..efbb09918c2 100644 --- a/vendor/k8s.io/api/discovery/v1/types.go +++ b/vendor/k8s.io/api/discovery/v1/types.go @@ -168,7 +168,7 @@ type ForZone struct { // +structType=atomic type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. @@ -195,7 +195,7 @@ type EndpointPort struct { // RFC-6335 and https://www.iana.org/assignments/service-names). // // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 // diff --git a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go index 41c3060568f..bef7745398a 100644 --- a/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go @@ -65,10 +65,10 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", - "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", } func (EndpointPort) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto index ec555a40b3a..8b6c360b0e6 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto +++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto @@ -119,7 +119,7 @@ message EndpointHints { // EndpointPort represents a Port used by an EndpointSlice message EndpointPort { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go index defd8e2ce69..f09f7f320cd 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types.go @@ -172,7 +172,7 @@ type ForZone struct { // EndpointPort represents a Port used by an EndpointSlice type EndpointPort struct { // name represents the name of this port. All ports in an EndpointSlice must have a unique name. - // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. + // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. // Name must either be an empty string or pass DNS_LABEL validation: // * must be no more than 63 characters long. // * must consist of lower case alphanumeric characters or '-'. diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go index 847d4d58e06..b1d4c306ccd 100644 --- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go @@ -64,7 +64,7 @@ func (EndpointHints) SwaggerDoc() map[string]string { var map_EndpointPort = map[string]string{ "": "EndpointPort represents a Port used by an EndpointSlice", - "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "name": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", "protocol": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", "port": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", "appProtocol": "appProtocol represents the application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.", diff --git a/vendor/k8s.io/api/flowcontrol/v1/doc.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go similarity index 73% rename from vendor/k8s.io/api/flowcontrol/v1/doc.go rename to vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go index 1bc51d40665..a3d4d0c6075 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/doc.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,8 +17,9 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:protobuf-gen=package // +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true // +groupName=flowcontrol.apiserver.k8s.io -// Package v1 holds api types of version v1 for group "flowcontrol.apiserver.k8s.io". -package v1 // import "k8s.io/api/flowcontrol/v1" +// Package v1alpha1 holds api types of version v1alpha1 for group "flowcontrol.apiserver.k8s.io". +package v1alpha1 // import "k8s.io/api/flowcontrol/v1alpha1" diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go similarity index 91% rename from vendor/k8s.io/api/flowcontrol/v1/generated.pb.go rename to vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go index c235ba10dee..b54e1ceefbb 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go @@ -15,9 +15,9 @@ limitations under the License. */ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto +// source: k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto -package v1 +package v1alpha1 import ( fmt "fmt" @@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} } func (*ExemptPriorityLevelConfiguration) ProtoMessage() {} func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{0} + return fileDescriptor_45ba024d525b289b, []int{0} } func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -74,7 +74,7 @@ var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} } func (*FlowDistinguisherMethod) ProtoMessage() {} func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{1} + return fileDescriptor_45ba024d525b289b, []int{1} } func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -102,7 +102,7 @@ var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo func (m *FlowSchema) Reset() { *m = FlowSchema{} } func (*FlowSchema) ProtoMessage() {} func (*FlowSchema) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{2} + return fileDescriptor_45ba024d525b289b, []int{2} } func (m *FlowSchema) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -130,7 +130,7 @@ var xxx_messageInfo_FlowSchema proto.InternalMessageInfo func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} } func (*FlowSchemaCondition) ProtoMessage() {} func (*FlowSchemaCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{3} + return fileDescriptor_45ba024d525b289b, []int{3} } func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} } func (*FlowSchemaList) ProtoMessage() {} func (*FlowSchemaList) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{4} + return fileDescriptor_45ba024d525b289b, []int{4} } func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -186,7 +186,7 @@ var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} } func (*FlowSchemaSpec) ProtoMessage() {} func (*FlowSchemaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{5} + return fileDescriptor_45ba024d525b289b, []int{5} } func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -214,7 +214,7 @@ var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} } func (*FlowSchemaStatus) ProtoMessage() {} func (*FlowSchemaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{6} + return fileDescriptor_45ba024d525b289b, []int{6} } func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -242,7 +242,7 @@ var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo func (m *GroupSubject) Reset() { *m = GroupSubject{} } func (*GroupSubject) ProtoMessage() {} func (*GroupSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{7} + return fileDescriptor_45ba024d525b289b, []int{7} } func (m *GroupSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -270,7 +270,7 @@ var xxx_messageInfo_GroupSubject proto.InternalMessageInfo func (m *LimitResponse) Reset() { *m = LimitResponse{} } func (*LimitResponse) ProtoMessage() {} func (*LimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{8} + return fileDescriptor_45ba024d525b289b, []int{8} } func (m *LimitResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -298,7 +298,7 @@ var xxx_messageInfo_LimitResponse proto.InternalMessageInfo func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} } func (*LimitedPriorityLevelConfiguration) ProtoMessage() {} func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{9} + return fileDescriptor_45ba024d525b289b, []int{9} } func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -326,7 +326,7 @@ var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} } func (*NonResourcePolicyRule) ProtoMessage() {} func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{10} + return fileDescriptor_45ba024d525b289b, []int{10} } func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -354,7 +354,7 @@ var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} } func (*PolicyRulesWithSubjects) ProtoMessage() {} func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{11} + return fileDescriptor_45ba024d525b289b, []int{11} } func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,7 +382,7 @@ var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} } func (*PriorityLevelConfiguration) ProtoMessage() {} func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{12} + return fileDescriptor_45ba024d525b289b, []int{12} } func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -410,7 +410,7 @@ var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} } func (*PriorityLevelConfigurationCondition) ProtoMessage() {} func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{13} + return fileDescriptor_45ba024d525b289b, []int{13} } func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -438,7 +438,7 @@ var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInf func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} } func (*PriorityLevelConfigurationList) ProtoMessage() {} func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{14} + return fileDescriptor_45ba024d525b289b, []int{14} } func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -466,7 +466,7 @@ var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} } func (*PriorityLevelConfigurationReference) ProtoMessage() {} func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{15} + return fileDescriptor_45ba024d525b289b, []int{15} } func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -494,7 +494,7 @@ var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInf func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} } func (*PriorityLevelConfigurationSpec) ProtoMessage() {} func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{16} + return fileDescriptor_45ba024d525b289b, []int{16} } func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,7 +522,7 @@ var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} } func (*PriorityLevelConfigurationStatus) ProtoMessage() {} func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{17} + return fileDescriptor_45ba024d525b289b, []int{17} } func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -550,7 +550,7 @@ var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} } func (*QueuingConfiguration) ProtoMessage() {} func (*QueuingConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{18} + return fileDescriptor_45ba024d525b289b, []int{18} } func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -578,7 +578,7 @@ var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} } func (*ResourcePolicyRule) ProtoMessage() {} func (*ResourcePolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{19} + return fileDescriptor_45ba024d525b289b, []int{19} } func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -606,7 +606,7 @@ var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} } func (*ServiceAccountSubject) ProtoMessage() {} func (*ServiceAccountSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{20} + return fileDescriptor_45ba024d525b289b, []int{20} } func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -634,7 +634,7 @@ var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{21} + return fileDescriptor_45ba024d525b289b, []int{21} } func (m *Subject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -662,7 +662,7 @@ var xxx_messageInfo_Subject proto.InternalMessageInfo func (m *UserSubject) Reset() { *m = UserSubject{} } func (*UserSubject) ProtoMessage() {} func (*UserSubject) Descriptor() ([]byte, []int) { - return fileDescriptor_f8a25df358697d27, []int{22} + return fileDescriptor_45ba024d525b289b, []int{22} } func (m *UserSubject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -688,137 +688,139 @@ func (m *UserSubject) XXX_DiscardUnknown() { var xxx_messageInfo_UserSubject proto.InternalMessageInfo func init() { - proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.ExemptPriorityLevelConfiguration") - proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1.FlowDistinguisherMethod") - proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1.FlowSchema") - proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaCondition") - proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaList") - proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaSpec") - proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaStatus") - proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1.GroupSubject") - proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1.LimitResponse") - proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.LimitedPriorityLevelConfiguration") - proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.NonResourcePolicyRule") - proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1.PolicyRulesWithSubjects") - proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfiguration") - proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationCondition") - proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationList") - proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationReference") - proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationSpec") - proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationStatus") - proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1.QueuingConfiguration") - proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.ResourcePolicyRule") - proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1.ServiceAccountSubject") - proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1.Subject") - proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1.UserSubject") + proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration") + proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowDistinguisherMethod") + proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchema") + proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaCondition") + proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaList") + proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaSpec") + proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.FlowSchemaStatus") + proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.GroupSubject") + proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitResponse") + proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration") + proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.NonResourcePolicyRule") + proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects") + proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfiguration") + proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition") + proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationList") + proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference") + proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec") + proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus") + proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1alpha1.QueuingConfiguration") + proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1alpha1.ResourcePolicyRule") + proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.ServiceAccountSubject") + proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1alpha1.Subject") + proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1alpha1.UserSubject") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_f8a25df358697d27) -} - -var fileDescriptor_f8a25df358697d27 = []byte{ - // 1588 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x73, 0x1b, 0xc5, - 0x16, 0xb6, 0x64, 0xc9, 0xb6, 0x8e, 0x9f, 0x69, 0xc7, 0x65, 0xc5, 0xb9, 0x25, 0x39, 0x73, 0xeb, - 0xe6, 0x71, 0x43, 0xa4, 0xc4, 0x45, 0x20, 0xa9, 0x00, 0xa9, 0x4c, 0x12, 0xf2, 0xb2, 0x1d, 0xa7, - 0x95, 0x07, 0x15, 0xa8, 0x82, 0xd1, 0xa8, 0x2d, 0x4d, 0x2c, 0xcd, 0x0c, 0xdd, 0x33, 0x32, 0xa6, - 0x8a, 0x2a, 0x7e, 0x42, 0x56, 0x2c, 0x59, 0xc0, 0x3f, 0x60, 0x45, 0xc1, 0x86, 0x65, 0x76, 0x64, - 0x19, 0x58, 0xa8, 0x88, 0xf8, 0x0b, 0x2c, 0x20, 0x2b, 0xaa, 0x7b, 0x7a, 0x66, 0x34, 0x92, 0x66, - 0xac, 0xf2, 0x22, 0x6c, 0xd8, 0x79, 0xce, 0xf9, 0xce, 0x77, 0xba, 0x4f, 0x9f, 0x97, 0x0c, 0xea, - 0xce, 0x05, 0x56, 0x32, 0xac, 0xf2, 0x8e, 0x5b, 0x25, 0xd4, 0x24, 0x0e, 0x61, 0xe5, 0x36, 0x31, - 0x6b, 0x16, 0x2d, 0x4b, 0x85, 0x66, 0x1b, 0xe5, 0xed, 0xa6, 0xb5, 0xab, 0x5b, 0xa6, 0x43, 0xad, - 0x66, 0xb9, 0x7d, 0xae, 0x5c, 0x27, 0x26, 0xa1, 0x9a, 0x43, 0x6a, 0x25, 0x9b, 0x5a, 0x8e, 0x85, - 0x8e, 0x78, 0xd0, 0x92, 0x66, 0x1b, 0xa5, 0x1e, 0x68, 0xa9, 0x7d, 0x6e, 0xe5, 0x4c, 0xdd, 0x70, - 0x1a, 0x6e, 0xb5, 0xa4, 0x5b, 0xad, 0x72, 0xdd, 0xaa, 0x5b, 0x65, 0x61, 0x51, 0x75, 0xb7, 0xc5, - 0x97, 0xf8, 0x10, 0x7f, 0x79, 0x4c, 0x2b, 0x6f, 0x86, 0x4e, 0x5b, 0x9a, 0xde, 0x30, 0x4c, 0x42, - 0xf7, 0xca, 0xf6, 0x4e, 0x9d, 0x0b, 0x58, 0xb9, 0x45, 0x1c, 0x6d, 0x88, 0xff, 0x95, 0x72, 0x9c, - 0x15, 0x75, 0x4d, 0xc7, 0x68, 0x91, 0x01, 0x83, 0xb7, 0xf6, 0x33, 0x60, 0x7a, 0x83, 0xb4, 0xb4, - 0x7e, 0x3b, 0xe5, 0xc7, 0x14, 0xac, 0x5e, 0xff, 0x8c, 0xb4, 0x6c, 0x67, 0x8b, 0x1a, 0x16, 0x35, - 0x9c, 0xbd, 0x75, 0xd2, 0x26, 0xcd, 0xab, 0x96, 0xb9, 0x6d, 0xd4, 0x5d, 0xaa, 0x39, 0x86, 0x65, - 0xa2, 0x0f, 0x20, 0x6f, 0x5a, 0x2d, 0xc3, 0xd4, 0xb8, 0x5c, 0x77, 0x29, 0x25, 0xa6, 0xbe, 0x57, - 0x69, 0x68, 0x94, 0xb0, 0x7c, 0x6a, 0x35, 0x75, 0x32, 0xab, 0xfe, 0xa7, 0xdb, 0x29, 0xe6, 0x37, - 0x63, 0x30, 0x38, 0xd6, 0x1a, 0xbd, 0x0b, 0xf3, 0x4d, 0x62, 0xd6, 0xb4, 0x6a, 0x93, 0x6c, 0x11, - 0xaa, 0x13, 0xd3, 0xc9, 0xa7, 0x05, 0xe1, 0x62, 0xb7, 0x53, 0x9c, 0x5f, 0x8f, 0xaa, 0x70, 0x3f, - 0x56, 0x79, 0x0c, 0xcb, 0xef, 0x37, 0xad, 0xdd, 0x6b, 0x06, 0x73, 0x0c, 0xb3, 0xee, 0x1a, 0xac, - 0x41, 0xe8, 0x06, 0x71, 0x1a, 0x56, 0x0d, 0x5d, 0x86, 0x8c, 0xb3, 0x67, 0x13, 0x71, 0xbe, 0x9c, - 0x7a, 0xfa, 0x59, 0xa7, 0x38, 0xd6, 0xed, 0x14, 0x33, 0xf7, 0xf7, 0x6c, 0xf2, 0xaa, 0x53, 0x3c, - 0x1a, 0x63, 0xc6, 0xd5, 0x58, 0x18, 0x2a, 0x4f, 0xd3, 0x00, 0x1c, 0x55, 0x11, 0x81, 0x43, 0x9f, - 0xc0, 0x14, 0x7f, 0xac, 0x9a, 0xe6, 0x68, 0x82, 0x73, 0x7a, 0xed, 0x6c, 0x29, 0x4c, 0x92, 0x20, - 0xe6, 0x25, 0x7b, 0xa7, 0xce, 0x05, 0xac, 0xc4, 0xd1, 0xa5, 0xf6, 0xb9, 0xd2, 0xdd, 0xea, 0x13, - 0xa2, 0x3b, 0x1b, 0xc4, 0xd1, 0x54, 0x24, 0x4f, 0x01, 0xa1, 0x0c, 0x07, 0xac, 0xe8, 0x0e, 0x64, - 0x98, 0x4d, 0x74, 0x11, 0x80, 0xe9, 0xb5, 0x53, 0xa5, 0xd8, 0x14, 0x2c, 0x85, 0xc7, 0xaa, 0xd8, - 0x44, 0x57, 0x67, 0xfc, 0xcb, 0xf1, 0x2f, 0x2c, 0x48, 0x50, 0x05, 0x26, 0x98, 0xa3, 0x39, 0x2e, - 0xcb, 0x8f, 0x0b, 0xba, 0xd3, 0xa3, 0xd1, 0x09, 0x13, 0x75, 0x4e, 0x12, 0x4e, 0x78, 0xdf, 0x58, - 0x52, 0x29, 0x2f, 0xd2, 0xb0, 0x18, 0x82, 0xaf, 0x5a, 0x66, 0xcd, 0x10, 0xf9, 0x71, 0x29, 0x12, - 0xeb, 0x13, 0x7d, 0xb1, 0x5e, 0x1e, 0x62, 0x12, 0xc6, 0x19, 0x5d, 0x0c, 0x4e, 0x9a, 0x16, 0xe6, - 0xc7, 0xa2, 0xce, 0x5f, 0x75, 0x8a, 0xf3, 0x81, 0x59, 0xf4, 0x3c, 0xa8, 0x0d, 0xa8, 0xa9, 0x31, - 0xe7, 0x3e, 0xd5, 0x4c, 0xe6, 0xd1, 0x1a, 0x2d, 0x22, 0x2f, 0xfc, 0xff, 0xd1, 0x5e, 0x87, 0x5b, - 0xa8, 0x2b, 0xd2, 0x25, 0x5a, 0x1f, 0x60, 0xc3, 0x43, 0x3c, 0xa0, 0xe3, 0x30, 0x41, 0x89, 0xc6, - 0x2c, 0x33, 0x9f, 0x11, 0x47, 0x0e, 0xe2, 0x85, 0x85, 0x14, 0x4b, 0x2d, 0x3a, 0x05, 0x93, 0x2d, - 0xc2, 0x98, 0x56, 0x27, 0xf9, 0xac, 0x00, 0xce, 0x4b, 0xe0, 0xe4, 0x86, 0x27, 0xc6, 0xbe, 0x5e, - 0xf9, 0x21, 0x05, 0x73, 0x61, 0x9c, 0xd6, 0x0d, 0xe6, 0xa0, 0x8f, 0x06, 0x32, 0xae, 0x34, 0xda, - 0x9d, 0xb8, 0xb5, 0xc8, 0xb7, 0x05, 0xe9, 0x6e, 0xca, 0x97, 0xf4, 0x64, 0xdb, 0x6d, 0xc8, 0x1a, - 0x0e, 0x69, 0xf1, 0xa8, 0x8f, 0x9f, 0x9c, 0x5e, 0xfb, 0xdf, 0x48, 0xf9, 0xa1, 0xce, 0x4a, 0xc6, - 0xec, 0x2d, 0x6e, 0x8b, 0x3d, 0x0a, 0xe5, 0x97, 0xf1, 0xde, 0xc3, 0xf3, 0x2c, 0x44, 0xdf, 0xa4, - 0x60, 0xc5, 0x8e, 0xed, 0x28, 0xf2, 0x3e, 0xef, 0x25, 0x38, 0x8d, 0x6f, 0x47, 0x98, 0x6c, 0x13, - 0xde, 0x43, 0x88, 0xaa, 0xc8, 0xd3, 0xac, 0x24, 0x80, 0x13, 0x4e, 0x81, 0x6e, 0x03, 0x6a, 0x69, - 0x0e, 0x8f, 0x63, 0x7d, 0x8b, 0x12, 0x9d, 0xd4, 0x38, 0xab, 0x6c, 0x40, 0x41, 0x4e, 0x6c, 0x0c, - 0x20, 0xf0, 0x10, 0x2b, 0xf4, 0x05, 0x2c, 0xd6, 0x06, 0xfb, 0x89, 0x4c, 0xc6, 0xb5, 0x7d, 0xa2, - 0x3b, 0xa4, 0x13, 0xa9, 0xcb, 0xdd, 0x4e, 0x71, 0x71, 0x88, 0x02, 0x0f, 0xf3, 0x83, 0x1e, 0x41, - 0x96, 0xba, 0x4d, 0xc2, 0xf2, 0x19, 0xf1, 0x9c, 0x49, 0x0e, 0xb7, 0xac, 0xa6, 0xa1, 0xef, 0x61, - 0x8e, 0x7e, 0x64, 0x38, 0x8d, 0x8a, 0x2b, 0x9a, 0x11, 0x0b, 0xdf, 0x56, 0xa8, 0xb0, 0xc7, 0xa7, - 0xb4, 0x61, 0xa1, 0xbf, 0x3f, 0xa0, 0x2a, 0x80, 0xee, 0x97, 0x24, 0x9f, 0x00, 0xe3, 0x7d, 0xb9, - 0x19, 0x9f, 0x40, 0x41, 0x25, 0x87, 0xbd, 0x30, 0x10, 0x31, 0xdc, 0xc3, 0xaa, 0x9c, 0x85, 0x99, - 0x1b, 0xd4, 0x72, 0x6d, 0x79, 0x3c, 0xb4, 0x0a, 0x19, 0x53, 0x6b, 0xf9, 0x3d, 0x26, 0x68, 0x79, - 0x9b, 0x5a, 0x8b, 0x60, 0xa1, 0x51, 0xbe, 0x4e, 0xc1, 0xec, 0xba, 0xd1, 0x32, 0x1c, 0x4c, 0x98, - 0x6d, 0x99, 0x8c, 0xa0, 0xf3, 0x91, 0xbe, 0x74, 0xac, 0xaf, 0x2f, 0x1d, 0x8a, 0x80, 0x7b, 0x3a, - 0xd2, 0x43, 0x98, 0xfc, 0xd4, 0x25, 0xae, 0x61, 0xd6, 0x65, 0x2f, 0x2e, 0x27, 0xdc, 0xed, 0x9e, - 0x87, 0x8c, 0x24, 0x96, 0x3a, 0xcd, 0x6b, 0x5c, 0x6a, 0xb0, 0x4f, 0xa6, 0xfc, 0x91, 0x86, 0x63, - 0xc2, 0x27, 0xa9, 0xfd, 0x23, 0xc3, 0x96, 0xc0, 0x6c, 0xb3, 0xf7, 0xca, 0xf2, 0x76, 0x27, 0x13, - 0x6e, 0x17, 0x09, 0x91, 0xba, 0x24, 0x23, 0x18, 0x0d, 0x33, 0x8e, 0xb2, 0x0e, 0x9b, 0xe9, 0xe3, - 0xa3, 0xcf, 0x74, 0x74, 0x17, 0x96, 0xaa, 0x16, 0xa5, 0xd6, 0xae, 0x61, 0xd6, 0x85, 0x1f, 0x9f, - 0x24, 0x23, 0x48, 0x8e, 0x74, 0x3b, 0xc5, 0x25, 0x75, 0x18, 0x00, 0x0f, 0xb7, 0x53, 0x76, 0x61, - 0x69, 0x93, 0x77, 0x0d, 0x66, 0xb9, 0x54, 0x27, 0x61, 0xf6, 0xa3, 0x22, 0x64, 0xdb, 0x84, 0x56, - 0xbd, 0x0c, 0xce, 0xa9, 0x39, 0x9e, 0xfb, 0x0f, 0xb9, 0x00, 0x7b, 0x72, 0x7e, 0x13, 0x33, 0xb4, - 0x7c, 0x80, 0xd7, 0x59, 0x7e, 0x42, 0x40, 0xc5, 0x4d, 0x36, 0xa3, 0x2a, 0xdc, 0x8f, 0x55, 0x7e, - 0x4e, 0xc3, 0x72, 0x4c, 0xb1, 0xa1, 0x2d, 0x98, 0x62, 0xf2, 0x6f, 0x59, 0x40, 0x4a, 0xc2, 0x33, - 0x48, 0xb3, 0xb0, 0xa1, 0xfb, 0x3c, 0x38, 0x60, 0x41, 0x4f, 0x60, 0x96, 0x4a, 0xef, 0xc2, 0x9d, - 0x6c, 0xec, 0x67, 0x12, 0x68, 0x07, 0x63, 0x12, 0x3e, 0x31, 0xee, 0xe5, 0xc2, 0x51, 0x6a, 0xd4, - 0x86, 0x85, 0x9e, 0xcb, 0x7a, 0xee, 0xc6, 0x85, 0xbb, 0xb3, 0x09, 0xee, 0x86, 0xbe, 0x82, 0x9a, - 0x97, 0x1e, 0x17, 0x36, 0xfb, 0x18, 0xf1, 0x80, 0x0f, 0xe5, 0xa7, 0x34, 0x24, 0xf4, 0xfa, 0xd7, - 0xb0, 0xa3, 0x7d, 0x18, 0xd9, 0xd1, 0x2e, 0x1e, 0x68, 0x7e, 0xc5, 0xee, 0x6c, 0x7a, 0xdf, 0xce, - 0x76, 0xe9, 0x60, 0xf4, 0xc9, 0x3b, 0xdc, 0x9f, 0x69, 0xf8, 0x6f, 0xbc, 0x71, 0xb8, 0xd3, 0xdd, - 0x89, 0xf4, 0xce, 0xb7, 0xfb, 0x7a, 0xe7, 0x89, 0x11, 0x28, 0xfe, 0xdd, 0xf1, 0xfa, 0x76, 0xbc, - 0x5f, 0x53, 0x50, 0x88, 0x8f, 0xdb, 0x6b, 0xd8, 0xf9, 0x1e, 0x47, 0x77, 0xbe, 0xf3, 0x07, 0xca, - 0xaf, 0x98, 0x1d, 0xf0, 0x46, 0x52, 0x5a, 0x05, 0x2b, 0xdb, 0x08, 0x63, 0xfc, 0xdb, 0x74, 0x52, - 0x94, 0xc4, 0x72, 0xb9, 0xcf, 0xef, 0x8d, 0x88, 0xf5, 0x75, 0x93, 0x0f, 0x97, 0x16, 0x9f, 0x0f, - 0x5e, 0x2e, 0xea, 0x30, 0xd9, 0xf4, 0x86, 0xb0, 0xac, 0xe2, 0x77, 0xf6, 0x9b, 0x7f, 0x49, 0xe3, - 0xda, 0x1b, 0xf5, 0x12, 0x86, 0x7d, 0x66, 0xf4, 0x31, 0x4c, 0x10, 0xf1, 0xab, 0x7a, 0x84, 0x52, - 0xde, 0xef, 0xe7, 0xb7, 0x0a, 0x3c, 0xed, 0x3c, 0x14, 0x96, 0xb4, 0xca, 0x57, 0x29, 0x58, 0xdd, - 0xaf, 0x07, 0x20, 0x3a, 0x64, 0x4f, 0x3b, 0xd8, 0xce, 0x3d, 0xfa, 0xde, 0xf6, 0x5d, 0x0a, 0x0e, - 0x0f, 0xdb, 0x89, 0x78, 0x41, 0xf1, 0x45, 0x28, 0xd8, 0x62, 0x82, 0x82, 0xba, 0x27, 0xa4, 0x58, - 0x6a, 0xd1, 0x1b, 0x30, 0xd5, 0xd0, 0xcc, 0x5a, 0xc5, 0xf8, 0xdc, 0x5f, 0xc5, 0x83, 0x94, 0xbe, - 0x29, 0xe5, 0x38, 0x40, 0xa0, 0x6b, 0xb0, 0x20, 0xec, 0xd6, 0x89, 0x59, 0x77, 0x1a, 0xe2, 0x1d, - 0xe4, 0xb6, 0x11, 0xcc, 0x95, 0x7b, 0x7d, 0x7a, 0x3c, 0x60, 0xa1, 0xfc, 0x95, 0x02, 0x74, 0x90, - 0x05, 0xe1, 0x34, 0xe4, 0x34, 0xdb, 0x10, 0x7b, 0xaa, 0x57, 0x54, 0x39, 0x75, 0xb6, 0xdb, 0x29, - 0xe6, 0xae, 0x6c, 0xdd, 0xf2, 0x84, 0x38, 0xd4, 0x73, 0xb0, 0x3f, 0x45, 0xbd, 0x69, 0x29, 0xc1, - 0xbe, 0x63, 0x86, 0x43, 0x3d, 0xba, 0x00, 0x33, 0x7a, 0xd3, 0x65, 0x0e, 0xa1, 0x15, 0xdd, 0xb2, - 0x89, 0x68, 0x42, 0x53, 0xea, 0x61, 0x79, 0xa7, 0x99, 0xab, 0x3d, 0x3a, 0x1c, 0x41, 0xa2, 0x12, - 0x00, 0xaf, 0x23, 0x66, 0x6b, 0xdc, 0x4f, 0x56, 0xf8, 0x99, 0xe3, 0x0f, 0xb6, 0x19, 0x48, 0x71, - 0x0f, 0x42, 0x79, 0x02, 0x4b, 0x15, 0x42, 0xdb, 0x86, 0x4e, 0xae, 0xe8, 0xba, 0xe5, 0x9a, 0x8e, - 0xbf, 0x71, 0x97, 0x21, 0x17, 0xc0, 0x64, 0xa9, 0x1d, 0x92, 0xfe, 0x73, 0x01, 0x17, 0x0e, 0x31, - 0x41, 0x6d, 0xa7, 0x63, 0x6b, 0xfb, 0xfb, 0x34, 0x4c, 0x86, 0xf4, 0x99, 0x1d, 0xc3, 0xac, 0x49, - 0xe6, 0xa3, 0x3e, 0xfa, 0x8e, 0x61, 0xd6, 0x5e, 0x75, 0x8a, 0xd3, 0x12, 0xc6, 0x3f, 0xb1, 0x00, - 0xa2, 0x6b, 0x90, 0x71, 0x19, 0xa1, 0xb2, 0x6a, 0x8f, 0x27, 0xe4, 0xf1, 0x03, 0x46, 0xa8, 0xbf, - 0x32, 0x4d, 0x71, 0x52, 0x2e, 0xc0, 0xc2, 0x1a, 0xdd, 0x84, 0x6c, 0x9d, 0xbf, 0x87, 0x2c, 0xcc, - 0x13, 0x09, 0x34, 0xbd, 0xbf, 0x3f, 0xbc, 0xc7, 0x17, 0x12, 0xec, 0x11, 0xa0, 0x26, 0xcc, 0xb1, - 0x48, 0xe0, 0xc4, 0x23, 0x25, 0xaf, 0x40, 0x43, 0x23, 0xad, 0xa2, 0x6e, 0xa7, 0x38, 0x17, 0x55, - 0xe1, 0x3e, 0x6e, 0xa5, 0x0c, 0xd3, 0x3d, 0xd7, 0xda, 0xbf, 0x8f, 0xaa, 0x97, 0x9f, 0xbd, 0x2c, - 0x8c, 0x3d, 0x7f, 0x59, 0x18, 0x7b, 0xf1, 0xb2, 0x30, 0xf6, 0x65, 0xb7, 0x90, 0x7a, 0xd6, 0x2d, - 0xa4, 0x9e, 0x77, 0x0b, 0xa9, 0x17, 0xdd, 0x42, 0xea, 0xb7, 0x6e, 0x21, 0xf5, 0xf4, 0xf7, 0xc2, - 0xd8, 0xe3, 0x23, 0xb1, 0xff, 0x13, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x0a, 0x3e, 0x83, - 0x48, 0x15, 0x00, 0x00, + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto", fileDescriptor_45ba024d525b289b) +} + +var fileDescriptor_45ba024d525b289b = []byte{ + // 1621 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4d, 0x6f, 0xdb, 0x46, + 0x1a, 0x36, 0x65, 0xc9, 0xb6, 0xc6, 0x9f, 0x19, 0xc7, 0xb0, 0xd6, 0x59, 0x48, 0x0e, 0x17, 0xd8, + 0x64, 0x37, 0x09, 0x15, 0x67, 0x93, 0x6c, 0x16, 0xc1, 0x22, 0x30, 0x93, 0x6c, 0xbe, 0x6c, 0xc7, + 0x1e, 0x27, 0xd9, 0x36, 0x48, 0x81, 0xd0, 0xd4, 0x58, 0x9a, 0x58, 0x22, 0xd9, 0x19, 0x52, 0x8e, + 0x8b, 0x1c, 0x0a, 0xf4, 0x0f, 0xf4, 0x07, 0xe4, 0xd8, 0x43, 0x6f, 0x05, 0x7a, 0xed, 0xa5, 0xc7, + 0xa0, 0xe8, 0x21, 0xc7, 0x9c, 0x84, 0x58, 0xbd, 0xf6, 0x07, 0xb4, 0x39, 0x14, 0xc5, 0x0c, 0x87, + 0xa4, 0x28, 0x89, 0xa2, 0x52, 0x03, 0x39, 0xf5, 0x66, 0xbe, 0x1f, 0xcf, 0x3b, 0xf3, 0xce, 0xfb, + 0xf1, 0xc8, 0xe0, 0xf6, 0xde, 0x15, 0xa6, 0x11, 0xbb, 0xbc, 0xe7, 0xed, 0x60, 0x6a, 0x61, 0x17, + 0xb3, 0x72, 0x13, 0x5b, 0x15, 0x9b, 0x96, 0xa5, 0xc2, 0x70, 0x48, 0x79, 0xb7, 0x6e, 0xef, 0x9b, + 0xb6, 0xe5, 0x52, 0xbb, 0x5e, 0x6e, 0xae, 0x18, 0x75, 0xa7, 0x66, 0xac, 0x94, 0xab, 0xd8, 0xc2, + 0xd4, 0x70, 0x71, 0x45, 0x73, 0xa8, 0xed, 0xda, 0xb0, 0xe4, 0x3b, 0x68, 0x86, 0x43, 0xb4, 0x0e, + 0x07, 0x2d, 0x70, 0x58, 0x3a, 0x57, 0x25, 0x6e, 0xcd, 0xdb, 0xd1, 0x4c, 0xbb, 0x51, 0xae, 0xda, + 0x55, 0xbb, 0x2c, 0xfc, 0x76, 0xbc, 0x5d, 0xf1, 0x25, 0x3e, 0xc4, 0x5f, 0x3e, 0xde, 0xd2, 0xc5, + 0xe8, 0x00, 0x0d, 0xc3, 0xac, 0x11, 0x0b, 0xd3, 0x83, 0xb2, 0xb3, 0x57, 0xe5, 0x02, 0x56, 0x6e, + 0x60, 0xd7, 0x28, 0x37, 0x7b, 0x4e, 0xb1, 0x54, 0x4e, 0xf2, 0xa2, 0x9e, 0xe5, 0x92, 0x06, 0xee, + 0x71, 0xb8, 0x9c, 0xe6, 0xc0, 0xcc, 0x1a, 0x6e, 0x18, 0xdd, 0x7e, 0xea, 0x77, 0x0a, 0x58, 0xbe, + 0xf9, 0x1c, 0x37, 0x1c, 0x77, 0x93, 0x12, 0x9b, 0x12, 0xf7, 0x60, 0x0d, 0x37, 0x71, 0xfd, 0xba, + 0x6d, 0xed, 0x92, 0xaa, 0x47, 0x0d, 0x97, 0xd8, 0x16, 0xfc, 0x08, 0x14, 0x2c, 0xbb, 0x41, 0x2c, + 0x83, 0xcb, 0x4d, 0x8f, 0x52, 0x6c, 0x99, 0x07, 0xdb, 0x35, 0x83, 0x62, 0x56, 0x50, 0x96, 0x95, + 0xd3, 0x39, 0xfd, 0xaf, 0xed, 0x56, 0xa9, 0xb0, 0x91, 0x60, 0x83, 0x12, 0xbd, 0xe1, 0x7f, 0xc1, + 0x6c, 0x1d, 0x5b, 0x15, 0x63, 0xa7, 0x8e, 0x37, 0x31, 0x35, 0xb1, 0xe5, 0x16, 0x32, 0x02, 0x70, + 0xbe, 0xdd, 0x2a, 0xcd, 0xae, 0xc5, 0x55, 0xa8, 0xdb, 0x56, 0x7d, 0x0c, 0x16, 0xff, 0x57, 0xb7, + 0xf7, 0x6f, 0x10, 0xe6, 0x12, 0xab, 0xea, 0x11, 0x56, 0xc3, 0x74, 0x1d, 0xbb, 0x35, 0xbb, 0x02, + 0xaf, 0x81, 0xac, 0x7b, 0xe0, 0x60, 0x71, 0xbe, 0xbc, 0x7e, 0xe6, 0x55, 0xab, 0x34, 0xd2, 0x6e, + 0x95, 0xb2, 0x0f, 0x0e, 0x1c, 0xfc, 0xae, 0x55, 0x3a, 0x91, 0xe0, 0xc6, 0xd5, 0x48, 0x38, 0xaa, + 0x2f, 0x33, 0x00, 0x70, 0xab, 0x6d, 0x91, 0x38, 0xf8, 0x14, 0x4c, 0xf0, 0xc7, 0xaa, 0x18, 0xae, + 0x21, 0x30, 0x27, 0x2f, 0x9c, 0xd7, 0xa2, 0x52, 0x09, 0x73, 0xae, 0x39, 0x7b, 0x55, 0x2e, 0x60, + 0x1a, 0xb7, 0xd6, 0x9a, 0x2b, 0xda, 0xfd, 0x9d, 0x67, 0xd8, 0x74, 0xd7, 0xb1, 0x6b, 0xe8, 0x50, + 0x9e, 0x02, 0x44, 0x32, 0x14, 0xa2, 0xc2, 0x2d, 0x90, 0x65, 0x0e, 0x36, 0x45, 0x02, 0x26, 0x2f, + 0x94, 0xb5, 0x94, 0x42, 0xd4, 0xa2, 0xc3, 0x6d, 0x3b, 0xd8, 0xd4, 0xa7, 0x82, 0x2b, 0xf2, 0x2f, + 0x24, 0xa0, 0xe0, 0xc7, 0x60, 0x8c, 0xb9, 0x86, 0xeb, 0xb1, 0xc2, 0xa8, 0x00, 0x5d, 0x79, 0x1f, + 0x50, 0xe1, 0xa8, 0xcf, 0x48, 0xd8, 0x31, 0xff, 0x1b, 0x49, 0x40, 0xf5, 0x4d, 0x06, 0xcc, 0x47, + 0xc6, 0xd7, 0x6d, 0xab, 0x42, 0x44, 0xad, 0x5c, 0x8d, 0xe5, 0xfd, 0x54, 0x57, 0xde, 0x17, 0xfb, + 0xb8, 0x44, 0x39, 0x87, 0xff, 0x09, 0xcf, 0x9b, 0x11, 0xee, 0x27, 0xe3, 0xc1, 0xdf, 0xb5, 0x4a, + 0xb3, 0xa1, 0x5b, 0xfc, 0x3c, 0xb0, 0x09, 0x60, 0xdd, 0x60, 0xee, 0x03, 0x6a, 0x58, 0xcc, 0x87, + 0x25, 0x0d, 0x2c, 0xaf, 0xfd, 0xcf, 0xe1, 0x5e, 0x8a, 0x7b, 0xe8, 0x4b, 0x32, 0x24, 0x5c, 0xeb, + 0x41, 0x43, 0x7d, 0x22, 0xc0, 0xbf, 0x83, 0x31, 0x8a, 0x0d, 0x66, 0x5b, 0x85, 0xac, 0x38, 0x72, + 0x98, 0x2f, 0x24, 0xa4, 0x48, 0x6a, 0xe1, 0x3f, 0xc0, 0x78, 0x03, 0x33, 0x66, 0x54, 0x71, 0x21, + 0x27, 0x0c, 0x67, 0xa5, 0xe1, 0xf8, 0xba, 0x2f, 0x46, 0x81, 0x5e, 0xfd, 0x5e, 0x01, 0x33, 0x51, + 0x9e, 0xd6, 0x08, 0x73, 0xe1, 0x93, 0x9e, 0xea, 0xd3, 0x86, 0xbb, 0x13, 0xf7, 0x16, 0xb5, 0x37, + 0x27, 0xc3, 0x4d, 0x04, 0x92, 0x8e, 0xca, 0xdb, 0x04, 0x39, 0xe2, 0xe2, 0x06, 0xcf, 0xfa, 0xe8, + 0xe9, 0xc9, 0x0b, 0x67, 0xde, 0xa3, 0x4a, 0xf4, 0x69, 0x89, 0x9b, 0xbb, 0xc3, 0x11, 0x90, 0x0f, + 0xa4, 0xfe, 0x3c, 0xda, 0x79, 0x05, 0x5e, 0x91, 0xf0, 0x6b, 0x05, 0x2c, 0x39, 0x89, 0x33, 0x46, + 0xde, 0xea, 0x46, 0x6a, 0xe8, 0xe4, 0x31, 0x85, 0xf0, 0x2e, 0xe6, 0xb3, 0x05, 0xeb, 0xaa, 0x3c, + 0xd3, 0xd2, 0x00, 0xe3, 0x01, 0x67, 0x81, 0x77, 0x01, 0x6c, 0x18, 0x2e, 0xcf, 0x69, 0x75, 0x93, + 0x62, 0x13, 0x57, 0x38, 0xaa, 0x1c, 0x4c, 0x61, 0x7d, 0xac, 0xf7, 0x58, 0xa0, 0x3e, 0x5e, 0xf0, + 0x0b, 0x05, 0xcc, 0x57, 0x7a, 0x07, 0x8d, 0xac, 0xcc, 0x2b, 0x43, 0xa5, 0xba, 0xcf, 0xa0, 0xd2, + 0x17, 0xdb, 0xad, 0xd2, 0x7c, 0x1f, 0x05, 0xea, 0x17, 0x0d, 0x7e, 0x02, 0x72, 0xd4, 0xab, 0x63, + 0x56, 0xc8, 0x8a, 0x17, 0x4e, 0x0f, 0xbb, 0x69, 0xd7, 0x89, 0x79, 0x80, 0xb8, 0xcf, 0xff, 0x89, + 0x5b, 0xdb, 0xf6, 0xc4, 0xc4, 0x62, 0xd1, 0x73, 0x0b, 0x15, 0xf2, 0x51, 0xd5, 0x17, 0x60, 0xae, + 0x7b, 0x70, 0xc0, 0x1a, 0x00, 0x66, 0xd0, 0xab, 0x7c, 0x4d, 0xf0, 0xb8, 0x17, 0xdf, 0xa3, 0xb2, + 0xc2, 0x46, 0x8f, 0xc6, 0x66, 0x28, 0x62, 0xa8, 0x03, 0x5b, 0x3d, 0x0f, 0xa6, 0x6e, 0x51, 0xdb, + 0x73, 0xe4, 0x21, 0xe1, 0x32, 0xc8, 0x5a, 0x46, 0x23, 0x18, 0x41, 0xe1, 0x5c, 0xdc, 0x30, 0x1a, + 0x18, 0x09, 0x8d, 0xfa, 0x95, 0x02, 0xa6, 0xd7, 0x48, 0x83, 0xb8, 0x08, 0x33, 0xc7, 0xb6, 0x18, + 0x86, 0x97, 0x62, 0x63, 0xeb, 0x64, 0xd7, 0xd8, 0x3a, 0x16, 0x33, 0xee, 0x18, 0x58, 0x4f, 0xc0, + 0xf8, 0xa7, 0x1e, 0xf6, 0x88, 0x55, 0x95, 0x63, 0xfb, 0x52, 0xea, 0x0d, 0xb7, 0x7c, 0xfb, 0x58, + 0xc5, 0xe9, 0x93, 0x7c, 0x10, 0x48, 0x0d, 0x0a, 0x20, 0xd5, 0xdf, 0x32, 0xe0, 0xa4, 0x88, 0x8c, + 0x2b, 0x03, 0xb6, 0xf3, 0x13, 0x50, 0x30, 0x18, 0xf3, 0x28, 0xae, 0x24, 0x6d, 0xe7, 0x65, 0x79, + 0x9d, 0xc2, 0x6a, 0x82, 0x1d, 0x4a, 0x44, 0x80, 0x7b, 0x60, 0xba, 0xde, 0x79, 0x79, 0x79, 0x4f, + 0x2d, 0xf5, 0x9e, 0xb1, 0x94, 0xe9, 0x0b, 0xf2, 0x08, 0xf1, 0xb4, 0xa3, 0x38, 0x76, 0x3f, 0x3a, + 0x30, 0x3a, 0x3c, 0x1d, 0x80, 0xf7, 0xc1, 0xc2, 0x8e, 0x4d, 0xa9, 0xbd, 0x4f, 0xac, 0xaa, 0x88, + 0x13, 0x80, 0x64, 0x05, 0xc8, 0x5f, 0xda, 0xad, 0xd2, 0x82, 0xde, 0xcf, 0x00, 0xf5, 0xf7, 0x53, + 0xf7, 0xc1, 0xc2, 0x06, 0x1f, 0x2c, 0xcc, 0xf6, 0xa8, 0x89, 0xa3, 0x9e, 0x80, 0x25, 0x90, 0x6b, + 0x62, 0xba, 0xe3, 0xd7, 0x75, 0x5e, 0xcf, 0xf3, 0x8e, 0x78, 0xc4, 0x05, 0xc8, 0x97, 0xf3, 0x9b, + 0x58, 0x91, 0xe7, 0x43, 0xb4, 0xc6, 0x0a, 0x63, 0xc2, 0x54, 0xdc, 0x64, 0x23, 0xae, 0x42, 0xdd, + 0xb6, 0xea, 0x61, 0x06, 0x2c, 0x26, 0xb4, 0x20, 0x7c, 0x04, 0x26, 0x98, 0xfc, 0x5b, 0xb6, 0xd5, + 0xe9, 0xd4, 0xc7, 0x90, 0xce, 0xd1, 0x16, 0x08, 0xd0, 0x50, 0x88, 0x05, 0x1d, 0x30, 0x4d, 0xe5, + 0x19, 0x44, 0x50, 0xb9, 0x0d, 0xfe, 0x95, 0x0a, 0xde, 0x9b, 0x9f, 0xe8, 0xb9, 0x51, 0x27, 0x22, + 0x8a, 0x07, 0x80, 0x2f, 0xc0, 0x5c, 0xc7, 0xc5, 0xfd, 0xa0, 0xa3, 0x22, 0xe8, 0xe5, 0xd4, 0xa0, + 0x7d, 0xdf, 0x45, 0x2f, 0xc8, 0xb8, 0x73, 0x1b, 0x5d, 0xb8, 0xa8, 0x27, 0x92, 0xfa, 0x63, 0x06, + 0x0c, 0x58, 0x10, 0x1f, 0x80, 0xf0, 0x19, 0x31, 0xc2, 0x77, 0xed, 0x08, 0xab, 0x2f, 0x91, 0x00, + 0x92, 0x2e, 0x02, 0xb8, 0x7a, 0x94, 0x20, 0x83, 0x09, 0xe1, 0x2f, 0x19, 0xf0, 0xb7, 0x64, 0xe7, + 0x88, 0x20, 0xde, 0x8b, 0x4d, 0xda, 0x7f, 0x77, 0x4d, 0xda, 0x53, 0x43, 0x40, 0xfc, 0x49, 0x18, + 0xbb, 0x08, 0xe3, 0x5b, 0x05, 0x14, 0x93, 0xf3, 0xf6, 0x01, 0x08, 0xe4, 0xd3, 0x38, 0x81, 0xbc, + 0x7a, 0x84, 0x2a, 0x4b, 0x20, 0x94, 0xb7, 0x06, 0x15, 0x57, 0xc8, 0xfc, 0x86, 0x58, 0xfd, 0xdf, + 0x64, 0x06, 0xe5, 0x4a, 0x30, 0xd5, 0x94, 0x9f, 0x30, 0x31, 0xef, 0x9b, 0x16, 0x5f, 0x40, 0x0d, + 0xbe, 0x43, 0xfc, 0x8a, 0x24, 0x60, 0xbc, 0xee, 0xaf, 0x6c, 0xd9, 0xd7, 0xfa, 0x70, 0x9b, 0x72, + 0xd0, 0x8a, 0xf7, 0xe9, 0x81, 0x34, 0x43, 0x01, 0x3e, 0xc4, 0x60, 0x0c, 0x8b, 0x9f, 0xee, 0x43, + 0x37, 0x77, 0xda, 0x2f, 0x7d, 0x1d, 0xf0, 0x42, 0xf4, 0xad, 0x90, 0x04, 0x57, 0x5f, 0x2a, 0x60, + 0x39, 0x6d, 0x2a, 0xc0, 0xe7, 0x7d, 0xd8, 0xde, 0x51, 0xc8, 0xfc, 0xf0, 0xec, 0xef, 0x5b, 0x05, + 0x1c, 0xef, 0xc7, 0xa9, 0x78, 0xa3, 0x71, 0x22, 0x15, 0xb2, 0xa0, 0xb0, 0xd1, 0xb6, 0x84, 0x14, + 0x49, 0x2d, 0x3c, 0x0b, 0x26, 0x6a, 0x86, 0x55, 0xd9, 0x26, 0x9f, 0x05, 0x1c, 0x3f, 0x2c, 0xf5, + 0xdb, 0x52, 0x8e, 0x42, 0x0b, 0x78, 0x03, 0xcc, 0x09, 0xbf, 0x35, 0x6c, 0x55, 0xdd, 0x9a, 0x78, + 0x13, 0xc9, 0x51, 0xc2, 0xdd, 0xb3, 0xd5, 0xa5, 0x47, 0x3d, 0x1e, 0xea, 0xaf, 0x0a, 0x80, 0x7f, + 0x84, 0x56, 0x9c, 0x01, 0x79, 0xc3, 0x21, 0x82, 0xed, 0xfa, 0xcd, 0x96, 0xd7, 0xa7, 0xdb, 0xad, + 0x52, 0x7e, 0x75, 0xf3, 0x8e, 0x2f, 0x44, 0x91, 0x9e, 0x1b, 0x07, 0xfb, 0xd6, 0xdf, 0xab, 0xd2, + 0x38, 0x08, 0xcc, 0x50, 0xa4, 0x87, 0x57, 0xc0, 0x94, 0x59, 0xf7, 0x98, 0x8b, 0xe9, 0xb6, 0x69, + 0x3b, 0x58, 0x0c, 0xa7, 0x09, 0xfd, 0xb8, 0xbc, 0xd3, 0xd4, 0xf5, 0x0e, 0x1d, 0x8a, 0x59, 0x42, + 0x0d, 0x00, 0xde, 0x59, 0xcc, 0x31, 0x78, 0x9c, 0x9c, 0x88, 0x33, 0xc3, 0x1f, 0x6c, 0x23, 0x94, + 0xa2, 0x0e, 0x0b, 0xf5, 0x19, 0x58, 0xd8, 0xc6, 0xb4, 0x49, 0x4c, 0xbc, 0x6a, 0x9a, 0xb6, 0x67, + 0xb9, 0x01, 0x6f, 0x2f, 0x83, 0x7c, 0x68, 0x26, 0x9b, 0xef, 0x98, 0x8c, 0x9f, 0x0f, 0xb1, 0x50, + 0x64, 0x13, 0x76, 0x7b, 0x26, 0xb1, 0xdb, 0x7f, 0xc8, 0x80, 0xf1, 0x08, 0x3e, 0xbb, 0x47, 0xac, + 0x8a, 0x44, 0x3e, 0x11, 0x58, 0xdf, 0x23, 0x56, 0xe5, 0x5d, 0xab, 0x34, 0x29, 0xcd, 0xf8, 0x27, + 0x12, 0x86, 0xf0, 0x2e, 0xc8, 0x7a, 0x0c, 0x53, 0xd9, 0xc7, 0x67, 0x53, 0xab, 0xf9, 0x21, 0xc3, + 0x34, 0x20, 0x5a, 0x13, 0x1c, 0x9a, 0x0b, 0x90, 0xc0, 0x80, 0x1b, 0x20, 0x57, 0xe5, 0xaf, 0x22, + 0x5b, 0xf5, 0x5c, 0x2a, 0x58, 0xe7, 0x2f, 0x1a, 0xbf, 0x10, 0x84, 0x04, 0xf9, 0x30, 0x90, 0x82, + 0x19, 0x16, 0x4b, 0xa2, 0x78, 0xb0, 0x61, 0x88, 0x53, 0xdf, 0xdc, 0xeb, 0xb0, 0xdd, 0x2a, 0xcd, + 0xc4, 0x55, 0xa8, 0x2b, 0x82, 0x5a, 0x06, 0x93, 0x1d, 0x57, 0x4c, 0x9f, 0xb5, 0xfa, 0xcd, 0x57, + 0x87, 0xc5, 0x91, 0xd7, 0x87, 0xc5, 0x91, 0x37, 0x87, 0xc5, 0x91, 0xcf, 0xdb, 0x45, 0xe5, 0x55, + 0xbb, 0xa8, 0xbc, 0x6e, 0x17, 0x95, 0x37, 0xed, 0xa2, 0xf2, 0xb6, 0x5d, 0x54, 0xbe, 0xfc, 0xa9, + 0x38, 0xf2, 0xb8, 0x94, 0xf2, 0x2f, 0xda, 0xdf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x6c, 0x4e, + 0x4e, 0xdd, 0x15, 0x00, 0x00, } func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) { @@ -1242,11 +1244,9 @@ func (m *LimitedPriorityLevelConfiguration) MarshalToSizedBuffer(dAtA []byte) (i } i-- dAtA[i] = 0x12 - if m.NominalConcurrencyShares != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.NominalConcurrencyShares)) - i-- - dAtA[i] = 0x8 - } + i = encodeVarintGenerated(dAtA, i, uint64(m.AssuredConcurrencyShares)) + i-- + dAtA[i] = 0x8 return len(dAtA) - i, nil } @@ -2007,9 +2007,7 @@ func (m *LimitedPriorityLevelConfiguration) Size() (n int) { } var l int _ = l - if m.NominalConcurrencyShares != nil { - n += 1 + sovGenerated(uint64(*m.NominalConcurrencyShares)) - } + n += 1 + sovGenerated(uint64(m.AssuredConcurrencyShares)) l = m.LimitResponse.Size() n += 1 + l + sovGenerated(uint64(l)) if m.LendablePercent != nil { @@ -2386,7 +2384,7 @@ func (this *LimitedPriorityLevelConfiguration) String() string { return "nil" } s := strings.Join([]string{`&LimitedPriorityLevelConfiguration{`, - `NominalConcurrencyShares:` + valueToStringGenerated(this.NominalConcurrencyShares) + `,`, + `AssuredConcurrencyShares:` + fmt.Sprintf("%v", this.AssuredConcurrencyShares) + `,`, `LimitResponse:` + strings.Replace(strings.Replace(this.LimitResponse.String(), "LimitResponse", "LimitResponse", 1), `&`, ``, 1) + `,`, `LendablePercent:` + valueToStringGenerated(this.LendablePercent) + `,`, `BorrowingLimitPercent:` + valueToStringGenerated(this.BorrowingLimitPercent) + `,`, @@ -3715,9 +3713,9 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NominalConcurrencyShares", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AssuredConcurrencyShares", wireType) } - var v int32 + m.AssuredConcurrencyShares = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3727,12 +3725,11 @@ func (m *LimitedPriorityLevelConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + m.AssuredConcurrencyShares |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.NominalConcurrencyShares = &v case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LimitResponse", wireType) diff --git a/vendor/k8s.io/api/flowcontrol/v1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto similarity index 94% rename from vendor/k8s.io/api/flowcontrol/v1/generated.proto rename to vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto index a5c6f4fc4f3..6509386f26f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto @@ -19,14 +19,14 @@ limitations under the License. syntax = "proto2"; -package k8s.io.api.flowcontrol.v1; +package k8s.io.api.flowcontrol.v1alpha1; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "k8s.io/api/flowcontrol/v1"; +option go_package = "k8s.io/api/flowcontrol/v1alpha1"; // ExemptPriorityLevelConfiguration describes the configurable aspects // of the handling of exempt requests. @@ -153,8 +153,6 @@ message FlowSchemaStatus { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge // +optional repeated FlowSchemaCondition conditions = 1; } @@ -192,28 +190,23 @@ message LimitResponse { // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? message LimitedPriorityLevelConfiguration { - // `nominalConcurrencyShares` (NCS) contributes to the computation of the - // NominalConcurrencyLimit (NominalCL) of this level. - // This is the number of execution seats available at this priority level. - // This is used both for requests dispatched from this priority level - // as well as requests dispatched from other priority levels - // borrowing seats from this level. - // The server's concurrency limit (ServerCL) is divided among the - // Limited priority levels in proportion to their NCS values: - // - // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) - // sum_ncs = sum[priority level k] NCS(k) + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: // - // Bigger numbers mean a larger nominal concurrency limit, - // at the expense of every other priority level. - // - // If not specified, this field defaults to a value of 30. - // - // Setting this field to zero supports the construction of a - // "jail" for this priority level that is used to hold some request(s) + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. // +optional - optional int32 nominalConcurrencyShares = 1; + optional int32 assuredConcurrencyShares = 1; // `limitResponse` indicates what to do with requests that can not be executed right now optional LimitResponse limitResponse = 2; @@ -388,8 +381,6 @@ message PriorityLevelConfigurationStatus { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge // +optional repeated PriorityLevelConfigurationCondition conditions = 1; } diff --git a/vendor/k8s.io/api/flowcontrol/v1/register.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1/register.go rename to vendor/k8s.io/api/flowcontrol/v1alpha1/register.go index 02725b514e0..0c8a9cc5657 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/register.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +26,7 @@ import ( const GroupName = "flowcontrol.apiserver.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { diff --git a/vendor/k8s.io/api/flowcontrol/v1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go similarity index 88% rename from vendor/k8s.io/api/flowcontrol/v1/types.go rename to vendor/k8s.io/api/flowcontrol/v1alpha1/types.go index e62d23280e5..161411ff338 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Kubernetes Authors. +Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -57,55 +57,13 @@ const ( ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID" ) -const ( - // AutoUpdateAnnotationKey is the name of an annotation that enables - // automatic update of the spec of the bootstrap configuration - // object(s), if set to 'true'. - // - // On a fresh install, all bootstrap configuration objects will have auto - // update enabled with the following annotation key: - // apf.kubernetes.io/autoupdate-spec: 'true' - // - // The kube-apiserver periodically checks the bootstrap configuration - // objects on the cluster and applies updates if necessary. - // - // kube-apiserver enforces an 'always auto-update' policy for the - // mandatory configuration object(s). This implies: - // - the auto-update annotation key is added with a value of 'true' - // if it is missing. - // - the auto-update annotation key is set to 'true' if its current value - // is a boolean false or has an invalid boolean representation - // (if the cluster operator sets it to 'false' it will be stomped) - // - any changes to the spec made by the cluster operator will be - // stomped, except for changes to the `nominalConcurrencyShares` - // and `lendablePercent` fields of the PriorityLevelConfiguration - // named "exempt". - // - // The kube-apiserver will apply updates on the suggested configuration if: - // - the cluster operator has enabled auto-update by setting the annotation - // (apf.kubernetes.io/autoupdate-spec: 'true') or - // - the annotation key is missing but the generation is 1 - // - // If the suggested configuration object is missing the annotation key, - // kube-apiserver will update the annotation appropriately: - // - it is set to 'true' if generation of the object is '1' which usually - // indicates that the spec of the object has not been changed. - // - it is set to 'false' if generation of the object is greater than 1. - // - // The goal is to enable the kube-apiserver to apply update on suggested - // configuration objects installed by previous releases but not overwrite - // changes made by the cluster operators. - // Note that this distinction is imperfectly detected: in the case where an - // operator deletes a suggested configuration object and later creates it - // but with a variant spec and then does no updates of the object - // (generation is 1), the technique outlined above will incorrectly - // determine that the object should be auto-updated. - AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" -) - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -126,6 +84,10 @@ type FlowSchema struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -352,10 +314,8 @@ type FlowSchemaStatus struct { // `conditions` is a list of the current states of FlowSchema. // +listType=map // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge // +optional - Conditions []FlowSchemaCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + Conditions []FlowSchemaCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` } // FlowSchemaCondition describes conditions for a FlowSchema. @@ -381,6 +341,10 @@ type FlowSchemaConditionType string // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -400,6 +364,10 @@ type PriorityLevelConfiguration struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.18 +// +k8s:prerelease-lifecycle-gen:deprecated=1.20 +// +k8s:prerelease-lifecycle-gen:removed=1.21 +// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1beta3,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { @@ -458,28 +426,23 @@ const ( // - How are requests for this priority level limited? // - What should be done with requests that exceed the limit? type LimitedPriorityLevelConfiguration struct { - // `nominalConcurrencyShares` (NCS) contributes to the computation of the - // NominalConcurrencyLimit (NominalCL) of this level. - // This is the number of execution seats available at this priority level. - // This is used both for requests dispatched from this priority level - // as well as requests dispatched from other priority levels - // borrowing seats from this level. - // The server's concurrency limit (ServerCL) is divided among the - // Limited priority levels in proportion to their NCS values: - // - // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) - // sum_ncs = sum[priority level k] NCS(k) - // - // Bigger numbers mean a larger nominal concurrency limit, - // at the expense of every other priority level. - // - // If not specified, this field defaults to a value of 30. + // `assuredConcurrencyShares` (ACS) configures the execution + // limit, which is a limit on the number of requests of this + // priority level that may be exeucting at a given time. ACS must + // be a positive number. The server's concurrency limit (SCL) is + // divided among the concurrency-controlled priority levels in + // proportion to their assured concurrency shares. This produces + // the assured concurrency value (ACV) --- the number of requests + // that may be executing at a time --- for each such priority + // level: // - // Setting this field to zero supports the construction of a - // "jail" for this priority level that is used to hold some request(s) + // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) ) // + // bigger numbers of ACS mean more reserved concurrent requests (at the + // expense of every other PL). + // This field has a default value of 30. // +optional - NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares" protobuf:"varint,1,opt,name=nominalConcurrencyShares"` + AssuredConcurrencyShares int32 `json:"assuredConcurrencyShares" protobuf:"varint,1,opt,name=assuredConcurrencyShares"` // `limitResponse` indicates what to do with requests that can not be executed right now LimitResponse LimitResponse `json:"limitResponse,omitempty" protobuf:"bytes,2,opt,name=limitResponse"` @@ -623,10 +586,8 @@ type PriorityLevelConfigurationStatus struct { // `conditions` is the current state of "request-priority". // +listType=map // +listMapKey=type - // +patchMergeKey=type - // +patchStrategy=merge // +optional - Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + Conditions []PriorityLevelConfigurationCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` } // PriorityLevelConfigurationCondition defines the condition of priority level. diff --git a/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go similarity index 95% rename from vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go rename to vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go index b8cb436367a..1d0680c1085 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1alpha1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "nominalConcurrencyShares": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go similarity index 99% rename from vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go index f37090b75b4..a5c9737aa5f 100644 --- a/vendor/k8s.io/api/flowcontrol/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.deepcopy.go @@ -19,7 +19,7 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( runtime "k8s.io/apimachinery/pkg/runtime" @@ -237,11 +237,6 @@ func (in *LimitResponse) DeepCopy() *LimitResponse { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LimitedPriorityLevelConfiguration) DeepCopyInto(out *LimitedPriorityLevelConfiguration) { *out = *in - if in.NominalConcurrencyShares != nil { - in, out := &in.NominalConcurrencyShares, &out.NominalConcurrencyShares - *out = new(int32) - **out = **in - } in.LimitResponse.DeepCopyInto(&out.LimitResponse) if in.LendablePercent != nil { in, out := &in.LendablePercent, &out.LendablePercent diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 00000000000..2b6a3d3fd96 --- /dev/null +++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,122 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchema"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *FlowSchemaList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "FlowSchemaList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfiguration"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleIntroduced() (major, minor int) { + return 1, 18 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor int) { + return 1, 20 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta3", Kind: "PriorityLevelConfigurationList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { + return 1, 21 +} diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto index 04b54820c73..96df0ace798 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be executing at a given time. ACS must + // priority level that may be exeucting at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go index abc3e420096..9e05ff1a090 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be executing at a given time. ACS must + // priority level that may be exeucting at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go index d69bdac6228..1405f3c3ca6 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto index a832114afea..a8c8a327374 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/generated.proto @@ -192,7 +192,7 @@ message LimitResponse { message LimitedPriorityLevelConfiguration { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be executing at a given time. ACS must + // priority level that may be exeucting at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go index c66cb173f4a..e8cf7abfff6 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types.go @@ -466,7 +466,7 @@ const ( type LimitedPriorityLevelConfiguration struct { // `assuredConcurrencyShares` (ACS) configures the execution // limit, which is a limit on the number of requests of this - // priority level that may be executing at a given time. ACS must + // priority level that may be exeucting at a given time. ACS must // be a positive number. The server's concurrency limit (SCL) is // divided among the concurrency-controlled priority levels in // proportion to their assured concurrency shares. This produces diff --git a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go index 921122731af..49a41780966 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta2/types_swagger_doc_generated.go @@ -122,7 +122,7 @@ func (LimitResponse) SwaggerDoc() map[string]string { var map_LimitedPriorityLevelConfiguration = map[string]string{ "": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", - "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be executing at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", + "assuredConcurrencyShares": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) ", "limitResponse": "`limitResponse` indicates what to do with requests that can not be executed right now", "lendablePercent": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", "borrowingLimitPercent": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go index 0ffc22a2365..810941557b2 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/types.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/types.go @@ -103,25 +103,10 @@ const ( AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec" ) -const ( - // This annotation is only for use in v1beta3. - // - // The presence of this annotation in a v1beta3 object means that - // a zero value in the 'NominalConcurrencyShares' field means zero - // rather than the old default of 30. - // - // To set a zero value for the 'NominalConcurrencyShares' field in v1beta3, - // set the annotation to an empty string: - // "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares": "" - // - PriorityLevelPreserveZeroConcurrencySharesKey = "flowcontrol.k8s.io/v1beta3-preserve-zero-concurrency-shares" -) - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchema // FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with // similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher". @@ -143,7 +128,6 @@ type FlowSchema struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,FlowSchemaList // FlowSchemaList is a list of FlowSchema objects. type FlowSchemaList struct { @@ -400,7 +384,6 @@ type FlowSchemaConditionType string // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfiguration // PriorityLevelConfiguration represents the configuration of a priority level. type PriorityLevelConfiguration struct { @@ -421,7 +404,6 @@ type PriorityLevelConfiguration struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:prerelease-lifecycle-gen:introduced=1.26 -// +k8s:prerelease-lifecycle-gen:replacement=flowcontrol.apiserver.k8s.io,v1,PriorityLevelConfigurationList // PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects. type PriorityLevelConfigurationList struct { diff --git a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go index 7e46a1469db..24b76138500 100644 --- a/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.prerelease-lifecycle.go @@ -21,10 +21,6 @@ limitations under the License. package v1beta3 -import ( - schema "k8s.io/apimachinery/pkg/runtime/schema" -) - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *FlowSchema) APILifecycleIntroduced() (major, minor int) { @@ -37,12 +33,6 @@ func (in *FlowSchema) APILifecycleDeprecated() (major, minor int) { return 1, 29 } -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *FlowSchema) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchema"} -} - // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchema) APILifecycleRemoved() (major, minor int) { @@ -61,12 +51,6 @@ func (in *FlowSchemaList) APILifecycleDeprecated() (major, minor int) { return 1, 29 } -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *FlowSchemaList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "FlowSchemaList"} -} - // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *FlowSchemaList) APILifecycleRemoved() (major, minor int) { @@ -85,12 +69,6 @@ func (in *PriorityLevelConfiguration) APILifecycleDeprecated() (major, minor int return 1, 29 } -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PriorityLevelConfiguration) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfiguration"} -} - // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfiguration) APILifecycleRemoved() (major, minor int) { @@ -109,12 +87,6 @@ func (in *PriorityLevelConfigurationList) APILifecycleDeprecated() (major, minor return 1, 29 } -// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. -// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. -func (in *PriorityLevelConfigurationList) APILifecycleReplacement() schema.GroupVersionKind { - return schema.GroupVersionKind{Group: "flowcontrol.apiserver.k8s.io", Version: "v1", Kind: "PriorityLevelConfigurationList"} -} - // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. func (in *PriorityLevelConfigurationList) APILifecycleRemoved() (major, minor int) { diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go index 949ea513fe5..f54d1f82421 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -25,12 +25,14 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v11 "k8s.io/api/core/v1" math "math" math_bits "math/bits" reflect "reflect" strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" ) // Reference imports to suppress errors if they are not otherwise used. @@ -44,15 +46,15 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *IPAddress) Reset() { *m = IPAddress{} } -func (*IPAddress) ProtoMessage() {} -func (*IPAddress) Descriptor() ([]byte, []int) { +func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } +func (*ClusterCIDR) ProtoMessage() {} +func (*ClusterCIDR) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{0} } -func (m *IPAddress) XXX_Unmarshal(b []byte) error { +func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -60,27 +62,27 @@ func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { } return b[:n], nil } -func (m *IPAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddress.Merge(m, src) +func (m *ClusterCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDR.Merge(m, src) } -func (m *IPAddress) XXX_Size() int { +func (m *ClusterCIDR) XXX_Size() int { return m.Size() } -func (m *IPAddress) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddress.DiscardUnknown(m) +func (m *ClusterCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) } -var xxx_messageInfo_IPAddress proto.InternalMessageInfo +var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo -func (m *IPAddressList) Reset() { *m = IPAddressList{} } -func (*IPAddressList) ProtoMessage() {} -func (*IPAddressList) Descriptor() ([]byte, []int) { +func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } +func (*ClusterCIDRList) ProtoMessage() {} +func (*ClusterCIDRList) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{1} } -func (m *IPAddressList) XXX_Unmarshal(b []byte) error { +func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -88,27 +90,27 @@ func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *IPAddressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressList.Merge(m, src) +func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDRList.Merge(m, src) } -func (m *IPAddressList) XXX_Size() int { +func (m *ClusterCIDRList) XXX_Size() int { return m.Size() } -func (m *IPAddressList) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressList.DiscardUnknown(m) +func (m *ClusterCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) } -var xxx_messageInfo_IPAddressList proto.InternalMessageInfo +var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo -func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } -func (*IPAddressSpec) ProtoMessage() {} -func (*IPAddressSpec) Descriptor() ([]byte, []int) { +func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } +func (*ClusterCIDRSpec) ProtoMessage() {} +func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{2} } -func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { +func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -116,27 +118,27 @@ func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error } return b[:n], nil } -func (m *IPAddressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPAddressSpec.Merge(m, src) +func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) } -func (m *IPAddressSpec) XXX_Size() int { +func (m *ClusterCIDRSpec) XXX_Size() int { return m.Size() } -func (m *IPAddressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) +func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) } -var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo +var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo -func (m *ParentReference) Reset() { *m = ParentReference{} } -func (*ParentReference) ProtoMessage() {} -func (*ParentReference) Descriptor() ([]byte, []int) { +func (m *IPAddress) Reset() { *m = IPAddress{} } +func (*IPAddress) ProtoMessage() {} +func (*IPAddress) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{3} } -func (m *ParentReference) XXX_Unmarshal(b []byte) error { +func (m *IPAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -144,27 +146,27 @@ func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ParentReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParentReference.Merge(m, src) +func (m *IPAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddress.Merge(m, src) } -func (m *ParentReference) XXX_Size() int { +func (m *IPAddress) XXX_Size() int { return m.Size() } -func (m *ParentReference) XXX_DiscardUnknown() { - xxx_messageInfo_ParentReference.DiscardUnknown(m) +func (m *IPAddress) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddress.DiscardUnknown(m) } -var xxx_messageInfo_ParentReference proto.InternalMessageInfo +var xxx_messageInfo_IPAddress proto.InternalMessageInfo -func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} } -func (*ServiceCIDR) ProtoMessage() {} -func (*ServiceCIDR) Descriptor() ([]byte, []int) { +func (m *IPAddressList) Reset() { *m = IPAddressList{} } +func (*IPAddressList) ProtoMessage() {} +func (*IPAddressList) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{4} } -func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error { +func (m *IPAddressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -172,27 +174,27 @@ func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) } return b[:n], nil } -func (m *ServiceCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDR.Merge(m, src) +func (m *IPAddressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressList.Merge(m, src) } -func (m *ServiceCIDR) XXX_Size() int { +func (m *IPAddressList) XXX_Size() int { return m.Size() } -func (m *ServiceCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDR.DiscardUnknown(m) +func (m *IPAddressList) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressList.DiscardUnknown(m) } -var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo +var xxx_messageInfo_IPAddressList proto.InternalMessageInfo -func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} } -func (*ServiceCIDRList) ProtoMessage() {} -func (*ServiceCIDRList) Descriptor() ([]byte, []int) { +func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } +func (*IPAddressSpec) ProtoMessage() {} +func (*IPAddressSpec) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{5} } -func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error { +func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -200,55 +202,27 @@ func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *ServiceCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRList.Merge(m, src) +func (m *IPAddressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPAddressSpec.Merge(m, src) } -func (m *ServiceCIDRList) XXX_Size() int { +func (m *IPAddressSpec) XXX_Size() int { return m.Size() } -func (m *ServiceCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m) +func (m *IPAddressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IPAddressSpec.DiscardUnknown(m) } -var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo +var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo -func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} } -func (*ServiceCIDRSpec) ProtoMessage() {} -func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) { +func (m *ParentReference) Reset() { *m = ParentReference{} } +func (*ParentReference) ProtoMessage() {} +func (*ParentReference) Descriptor() ([]byte, []int) { return fileDescriptor_c1b7ac8d7d97acec, []int{6} } -func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRSpec.Merge(m, src) -} -func (m *ServiceCIDRSpec) XXX_Size() int { - return m.Size() -} -func (m *ServiceCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo - -func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} } -func (*ServiceCIDRStatus) ProtoMessage() {} -func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{7} -} -func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error { +func (m *ParentReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -256,27 +230,26 @@ func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, e } return b[:n], nil } -func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceCIDRStatus.Merge(m, src) +func (m *ParentReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParentReference.Merge(m, src) } -func (m *ServiceCIDRStatus) XXX_Size() int { +func (m *ParentReference) XXX_Size() int { return m.Size() } -func (m *ServiceCIDRStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m) +func (m *ParentReference) XXX_DiscardUnknown() { + xxx_messageInfo_ParentReference.DiscardUnknown(m) } -var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo +var xxx_messageInfo_ParentReference proto.InternalMessageInfo func init() { + proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") + proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") + proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference") - proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR") - proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList") - proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec") - proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus") } func init() { @@ -284,51 +257,54 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 648 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0x5f, 0xbf, 0x52, 0xaf, 0xa2, 0x2e, 0x9c, 0x28, 0x6c, 0x8a, - 0xa0, 0x33, 0x24, 0x42, 0x88, 0x2d, 0x6e, 0xa5, 0xaa, 0x12, 0xb4, 0x65, 0xba, 0x02, 0x75, 0xc1, - 0xc4, 0xbe, 0x75, 0x4c, 0xf0, 0x8f, 0x66, 0xc6, 0x01, 0x76, 0x3c, 0x02, 0x2f, 0xc0, 0x73, 0xb0, - 0x02, 0x89, 0x5d, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xf3, 0x22, 0x68, 0xc6, 0x8e, 0x9d, 0x34, 0xea, - 0xdf, 0xa6, 0x3b, 0xcf, 0xb9, 0xe7, 0x9c, 0xb9, 0xe7, 0xce, 0x8c, 0x8c, 0x76, 0x46, 0x2f, 0x04, - 0x0e, 0x62, 0x32, 0x4a, 0x07, 0xc0, 0x23, 0x90, 0x20, 0xc8, 0x18, 0x22, 0x2f, 0xe6, 0xa4, 0x28, - 0xb0, 0x24, 0x20, 0x11, 0xc8, 0x4f, 0x31, 0x1f, 0x05, 0x91, 0x4f, 0xc6, 0x3d, 0xf6, 0x31, 0x19, - 0xb2, 0x1e, 0xf1, 0x21, 0x02, 0xce, 0x24, 0x78, 0x38, 0xe1, 0xb1, 0x8c, 0x2d, 0x3b, 0xe7, 0x63, - 0x96, 0x04, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0xd3, 0x0f, 0xe4, 0x30, 0x1d, 0x60, 0x37, 0x0e, - 0x89, 0x1f, 0xfb, 0x31, 0xd1, 0xb2, 0x41, 0x7a, 0xac, 0x57, 0x7a, 0xa1, 0xbf, 0x72, 0xbb, 0xf5, - 0x67, 0xd5, 0xf6, 0x21, 0x73, 0x87, 0x41, 0x04, 0xfc, 0x0b, 0x49, 0x46, 0xbe, 0x02, 0x04, 0x09, - 0x41, 0x32, 0x32, 0x9e, 0x6b, 0x62, 0x9d, 0x5c, 0xa5, 0xe2, 0x69, 0x24, 0x83, 0x10, 0xe6, 0x04, - 0xcf, 0x6f, 0x12, 0x08, 0x77, 0x08, 0x21, 0xbb, 0xac, 0xeb, 0xfe, 0x32, 0x90, 0xb9, 0x7b, 0xf0, - 0xd2, 0xf3, 0x38, 0x08, 0x61, 0xbd, 0x47, 0xcb, 0xaa, 0x23, 0x8f, 0x49, 0xd6, 0x32, 0x3a, 0xc6, - 0x46, 0xb3, 0xff, 0x14, 0x57, 0xe3, 0x28, 0x8d, 0x71, 0x32, 0xf2, 0x15, 0x20, 0xb0, 0x62, 0xe3, - 0x71, 0x0f, 0xef, 0x0f, 0x3e, 0x80, 0x2b, 0x5f, 0x83, 0x64, 0x8e, 0x75, 0x72, 0xde, 0xae, 0x65, - 0xe7, 0x6d, 0x54, 0x61, 0xb4, 0x74, 0xb5, 0xf6, 0x51, 0x5d, 0x24, 0xe0, 0xb6, 0x16, 0xb4, 0xfb, - 0x26, 0xbe, 0x7e, 0xd8, 0xb8, 0x6c, 0xed, 0x30, 0x01, 0xd7, 0xf9, 0xaf, 0xb0, 0xae, 0xab, 0x15, - 0xd5, 0x46, 0xdd, 0x9f, 0x06, 0x5a, 0x29, 0x59, 0xaf, 0x02, 0x21, 0xad, 0xa3, 0xb9, 0x10, 0xf8, - 0x76, 0x21, 0x94, 0x5a, 0x47, 0x78, 0x50, 0xec, 0xb3, 0x3c, 0x41, 0xa6, 0x02, 0xec, 0xa1, 0x46, - 0x20, 0x21, 0x14, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xff, 0xe8, 0xd6, 0x09, 0x9c, 0x95, 0xc2, - 0xb5, 0xb1, 0xab, 0xf4, 0x34, 0xb7, 0xe9, 0x86, 0x53, 0xed, 0xab, 0x58, 0xd6, 0x11, 0x32, 0x13, - 0xc6, 0x21, 0x92, 0x14, 0x8e, 0x8b, 0xfe, 0xc9, 0x4d, 0x9b, 0x1c, 0x4c, 0x04, 0xc0, 0x21, 0x72, - 0xc1, 0x59, 0xc9, 0xce, 0xdb, 0x66, 0x09, 0xd2, 0xca, 0xb0, 0xfb, 0xc3, 0x40, 0xab, 0x97, 0xd8, - 0xd6, 0x43, 0xd4, 0xf0, 0x79, 0x9c, 0x26, 0x7a, 0x37, 0xb3, 0xea, 0x73, 0x47, 0x81, 0x34, 0xaf, - 0x59, 0x4f, 0xd0, 0x32, 0x07, 0x11, 0xa7, 0xdc, 0x05, 0x7d, 0x78, 0x66, 0x35, 0x25, 0x5a, 0xe0, - 0xb4, 0x64, 0x58, 0x04, 0x99, 0x11, 0x0b, 0x41, 0x24, 0xcc, 0x85, 0xd6, 0xa2, 0xa6, 0xaf, 0x15, - 0x74, 0x73, 0x6f, 0x52, 0xa0, 0x15, 0xc7, 0xea, 0xa0, 0xba, 0x5a, 0xb4, 0xea, 0x9a, 0x5b, 0x1e, - 0xb4, 0xe2, 0x52, 0x5d, 0xe9, 0x7e, 0x5f, 0x40, 0xcd, 0x43, 0xe0, 0xe3, 0xc0, 0x85, 0xad, 0xdd, - 0x6d, 0x7a, 0x0f, 0x77, 0xf5, 0xcd, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, 0xbb, 0xea, 0xb6, - 0x5a, 0x6f, 0xd1, 0x92, 0x90, 0x4c, 0xa6, 0x42, 0x0f, 0xa5, 0xd9, 0xef, 0xdd, 0xc5, 0x54, 0x0b, - 0x9d, 0xff, 0x0b, 0xdb, 0xa5, 0x7c, 0x4d, 0x0b, 0xc3, 0xee, 0x6f, 0x03, 0xad, 0x4e, 0xb1, 0xef, - 0xe1, 0x29, 0x1c, 0xcc, 0x3e, 0x85, 0xc7, 0x77, 0xc8, 0x72, 0xc5, 0x63, 0xe8, 0xcf, 0x44, 0xd0, - 0xcf, 0xa1, 0x8d, 0x1a, 0x6e, 0xe0, 0x71, 0xd1, 0x32, 0x3a, 0x8b, 0x1b, 0xa6, 0x63, 0x2a, 0x8d, - 0x2a, 0x0a, 0x9a, 0xe3, 0xdd, 0xcf, 0x68, 0x6d, 0x6e, 0x48, 0x96, 0x8b, 0x90, 0x1b, 0x47, 0x5e, - 0x20, 0x83, 0x38, 0xca, 0xa5, 0xb3, 0x07, 0x78, 0x4d, 0xf4, 0xad, 0x89, 0xae, 0xba, 0x1d, 0x25, - 0x24, 0xe8, 0x94, 0xad, 0xb3, 0x7d, 0x72, 0x61, 0xd7, 0x4e, 0x2f, 0xec, 0xda, 0xd9, 0x85, 0x5d, - 0xfb, 0x9a, 0xd9, 0xc6, 0x49, 0x66, 0x1b, 0xa7, 0x99, 0x6d, 0x9c, 0x65, 0xb6, 0xf1, 0x27, 0xb3, - 0x8d, 0x6f, 0x7f, 0xed, 0xda, 0x3b, 0xfb, 0xfa, 0xff, 0xcf, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x29, 0x82, 0x11, 0x57, 0xb9, 0x06, 0x00, 0x00, -} - -func (m *IPAddress) Marshal() (dAtA []byte, err error) { + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, + 0x14, 0xc6, 0x63, 0x92, 0x48, 0x78, 0x00, 0x85, 0xeb, 0xcd, 0x8d, 0x58, 0x38, 0xb9, 0xb9, 0x1b, + 0xae, 0x6e, 0x19, 0x03, 0x42, 0x51, 0xb7, 0x98, 0x48, 0x34, 0x52, 0x0b, 0xe9, 0x20, 0xba, 0xa8, + 0x58, 0xd4, 0xb1, 0x0f, 0x8e, 0x1b, 0xfc, 0x47, 0x33, 0xe3, 0x54, 0xec, 0xfa, 0x08, 0x7d, 0xa1, + 0x56, 0x6a, 0x57, 0x2c, 0x59, 0xb2, 0x8a, 0x8a, 0xfb, 0x02, 0x5d, 0xb7, 0x9b, 0x6a, 0x26, 0x4e, + 0xec, 0x24, 0x0d, 0xd0, 0x0d, 0xbb, 0xcc, 0x39, 0xbf, 0xf3, 0xcd, 0x39, 0x73, 0xbe, 0x24, 0xe8, + 0xb0, 0xff, 0x94, 0x61, 0x2f, 0x34, 0xfa, 0x71, 0x17, 0x68, 0x00, 0x1c, 0x98, 0x31, 0x80, 0xc0, + 0x09, 0xa9, 0x91, 0x26, 0xac, 0xc8, 0x33, 0x02, 0xe0, 0xef, 0x42, 0xda, 0xf7, 0x02, 0xd7, 0x18, + 0xec, 0x58, 0x17, 0x51, 0xcf, 0xda, 0x31, 0x5c, 0x08, 0x80, 0x5a, 0x1c, 0x1c, 0x1c, 0xd1, 0x90, + 0x87, 0x9a, 0x3e, 0xe2, 0xb1, 0x15, 0x79, 0x38, 0xe3, 0xf1, 0x98, 0xdf, 0xd8, 0x72, 0x3d, 0xde, + 0x8b, 0xbb, 0xd8, 0x0e, 0x7d, 0xc3, 0x0d, 0xdd, 0xd0, 0x90, 0x65, 0xdd, 0xf8, 0x5c, 0x9e, 0xe4, + 0x41, 0x7e, 0x1a, 0xc9, 0x6d, 0x34, 0x72, 0xd7, 0xdb, 0x21, 0x05, 0x63, 0x30, 0x77, 0xe5, 0xc6, + 0x5e, 0xc6, 0xf8, 0x96, 0xdd, 0xf3, 0x02, 0xa0, 0x97, 0x46, 0xd4, 0x77, 0x45, 0x80, 0x19, 0x3e, + 0x70, 0xeb, 0x77, 0x55, 0xc6, 0xa2, 0x2a, 0x1a, 0x07, 0xdc, 0xf3, 0x61, 0xae, 0xa0, 0x79, 0x5f, + 0x01, 0xb3, 0x7b, 0xe0, 0x5b, 0xb3, 0x75, 0x8d, 0x2f, 0x0a, 0x5a, 0x39, 0xb8, 0x88, 0x19, 0x07, + 0x7a, 0xd0, 0x6e, 0x11, 0xed, 0x0d, 0x5a, 0x16, 0x3d, 0x39, 0x16, 0xb7, 0xaa, 0x4a, 0x5d, 0xd9, + 0x5c, 0xd9, 0xdd, 0xc6, 0xd9, 0xa3, 0x4d, 0xa4, 0x71, 0xd4, 0x77, 0x45, 0x80, 0x61, 0x41, 0xe3, + 0xc1, 0x0e, 0x3e, 0xee, 0xbe, 0x05, 0x9b, 0xbf, 0x00, 0x6e, 0x99, 0xda, 0xd5, 0xb0, 0x56, 0x48, + 0x86, 0x35, 0x94, 0xc5, 0xc8, 0x44, 0x55, 0x7b, 0x89, 0x4a, 0x2c, 0x02, 0xbb, 0xba, 0x24, 0xd5, + 0x0d, 0x7c, 0xf7, 0x4a, 0x70, 0xae, 0xb9, 0x93, 0x08, 0x6c, 0x73, 0x35, 0x15, 0x2f, 0x89, 0x13, + 0x91, 0x52, 0x8d, 0xcf, 0x0a, 0xaa, 0xe4, 0xb8, 0xe7, 0x1e, 0xe3, 0xda, 0xd9, 0xdc, 0x20, 0xf8, + 0x61, 0x83, 0x88, 0x6a, 0x39, 0xc6, 0x7a, 0x7a, 0xd3, 0xf2, 0x38, 0x92, 0x1b, 0xa2, 0x83, 0xca, + 0x1e, 0x07, 0x9f, 0x55, 0x97, 0xea, 0xc5, 0xcd, 0x95, 0xdd, 0xff, 0xff, 0x60, 0x0a, 0x73, 0x2d, + 0xd5, 0x2d, 0xb7, 0x85, 0x02, 0x19, 0x09, 0x35, 0xbe, 0x4f, 0xcf, 0x20, 0xa6, 0xd3, 0x5e, 0xa1, + 0xd5, 0x20, 0x74, 0xe0, 0x04, 0x2e, 0xc0, 0xe6, 0x21, 0x4d, 0xe7, 0xa8, 0xe7, 0x2f, 0x13, 0xb6, + 0x13, 0x5d, 0x1f, 0xe5, 0x38, 0x73, 0x3d, 0x19, 0xd6, 0x56, 0xf3, 0x11, 0x32, 0xa5, 0xa3, 0xed, + 0xa3, 0x4a, 0x04, 0x54, 0x00, 0xcf, 0x42, 0xc6, 0x4d, 0x8f, 0x33, 0xb9, 0x8d, 0xb2, 0xf9, 0x77, + 0xda, 0x5a, 0xa5, 0x33, 0x9d, 0x26, 0xb3, 0xbc, 0x56, 0x47, 0x25, 0x2f, 0x1a, 0xec, 0x55, 0x8b, + 0x75, 0x65, 0x53, 0xcd, 0x96, 0xd2, 0xee, 0x0c, 0xf6, 0x88, 0xcc, 0xa4, 0x44, 0xb3, 0x5a, 0x9a, + 0x23, 0x9a, 0x92, 0x68, 0x36, 0x3e, 0x29, 0x48, 0x6d, 0x77, 0xf6, 0x1d, 0x87, 0x02, 0x63, 0x8f, + 0xe0, 0xbc, 0xe3, 0x29, 0xe7, 0x6d, 0xdd, 0xb7, 0xb3, 0x49, 0x6b, 0x0b, 0x7d, 0xf7, 0x51, 0x41, + 0x6b, 0x13, 0xea, 0x11, 0x5c, 0x77, 0x34, 0xed, 0xba, 0xff, 0x1e, 0x3c, 0xc1, 0x02, 0xcf, 0xf9, + 0xb9, 0xf6, 0xa5, 0xe1, 0xce, 0x90, 0x1a, 0x59, 0x14, 0x02, 0x4e, 0xe0, 0x3c, 0xed, 0xff, 0xde, + 0x2f, 0x68, 0x67, 0x5c, 0x00, 0x14, 0x02, 0x1b, 0xcc, 0xb5, 0x64, 0x58, 0x53, 0x27, 0x41, 0x92, + 0x09, 0x36, 0x7e, 0x2a, 0xa8, 0x32, 0x43, 0x6b, 0xff, 0xa2, 0xb2, 0x4b, 0xc3, 0x38, 0x92, 0xb7, + 0xa9, 0x59, 0x9f, 0x87, 0x22, 0x48, 0x46, 0x39, 0xed, 0x09, 0x5a, 0xa6, 0xc0, 0xc2, 0x98, 0xda, + 0x20, 0x97, 0xa7, 0x66, 0xaf, 0x44, 0xd2, 0x38, 0x99, 0x10, 0x9a, 0x81, 0xd4, 0xc0, 0xf2, 0x81, + 0x45, 0x96, 0x0d, 0xa9, 0x3f, 0xff, 0x4a, 0x71, 0xf5, 0x68, 0x9c, 0x20, 0x19, 0x23, 0x9c, 0x2a, + 0x0e, 0xb3, 0x4e, 0x15, 0x2c, 0x91, 0x19, 0xcd, 0x44, 0xc5, 0xd8, 0x73, 0xaa, 0x65, 0x09, 0x6c, + 0xa7, 0x40, 0xf1, 0xb4, 0xdd, 0xfa, 0x31, 0xac, 0xfd, 0xb3, 0xe8, 0x97, 0x97, 0x5f, 0x46, 0xc0, + 0xf0, 0x69, 0xbb, 0x45, 0x44, 0xb1, 0xd9, 0xba, 0xba, 0xd5, 0x0b, 0xd7, 0xb7, 0x7a, 0xe1, 0xe6, + 0x56, 0x2f, 0xbc, 0x4f, 0x74, 0xe5, 0x2a, 0xd1, 0x95, 0xeb, 0x44, 0x57, 0x6e, 0x12, 0x5d, 0xf9, + 0x9a, 0xe8, 0xca, 0x87, 0x6f, 0x7a, 0xe1, 0xb5, 0x7e, 0xf7, 0x3f, 0xda, 0xaf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xf9, 0x9d, 0x9e, 0xc6, 0x0b, 0x07, 0x00, 0x00, +} + +func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -338,12 +314,12 @@ func (m *IPAddress) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -371,7 +347,7 @@ func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddressList) Marshal() (dAtA []byte, err error) { +func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -381,12 +357,12 @@ func (m *IPAddressList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -418,7 +394,7 @@ func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { +func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -428,19 +404,32 @@ func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ParentRef != nil { + i -= len(m.IPv6) + copy(dAtA[i:], m.IPv6) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) + i-- + dAtA[i] = 0x22 + i -= len(m.IPv4) + copy(dAtA[i:], m.IPv4) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) + i-- + dAtA[i] = 0x10 + if m.NodeSelector != nil { { - size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -453,50 +442,7 @@ func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ParentReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x22 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { +func (m *IPAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -506,26 +452,16 @@ func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a { size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -549,7 +485,7 @@ func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { +func (m *IPAddressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -559,12 +495,12 @@ func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -596,7 +532,7 @@ func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { +func (m *IPAddressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -606,29 +542,32 @@ func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *IPAddressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.CIDRs) > 0 { - for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CIDRs[iNdEx]) - copy(dAtA[i:], m.CIDRs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.ParentRef != nil { + { + size, err := m.ParentRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { +func (m *ParentReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -638,30 +577,41 @@ func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) { +func (m *ParentReference) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -676,7 +626,7 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *IPAddress) Size() (n int) { +func (m *ClusterCIDR) Size() (n int) { if m == nil { return 0 } @@ -689,7 +639,7 @@ func (m *IPAddress) Size() (n int) { return n } -func (m *IPAddressList) Size() (n int) { +func (m *ClusterCIDRList) Size() (n int) { if m == nil { return 0 } @@ -706,37 +656,25 @@ func (m *IPAddressList) Size() (n int) { return n } -func (m *IPAddressSpec) Size() (n int) { +func (m *ClusterCIDRSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.ParentRef != nil { - l = m.ParentRef.Size() + if m.NodeSelector != nil { + l = m.NodeSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *ParentReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Group) + n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) + l = len(m.IPv4) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) + l = len(m.IPv6) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ServiceCIDR) Size() (n int) { +func (m *IPAddress) Size() (n int) { if m == nil { return 0 } @@ -746,12 +684,10 @@ func (m *ServiceCIDR) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = m.Spec.Size() n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ServiceCIDRList) Size() (n int) { +func (m *IPAddressList) Size() (n int) { if m == nil { return 0 } @@ -768,33 +704,35 @@ func (m *ServiceCIDRList) Size() (n int) { return n } -func (m *ServiceCIDRSpec) Size() (n int) { +func (m *IPAddressSpec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.CIDRs) > 0 { - for _, s := range m.CIDRs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.ParentRef != nil { + l = m.ParentRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ServiceCIDRStatus) Size() (n int) { +func (m *ParentReference) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -804,105 +742,93 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *IPAddress) String() string { +func (this *ClusterCIDR) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IPAddress{`, + s := strings.Join([]string{`&ClusterCIDR{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *IPAddressList) String() string { +func (this *ClusterCIDRList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]IPAddress{" + repeatedStringForItems := "[]ClusterCIDR{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&IPAddressList{`, + s := strings.Join([]string{`&ClusterCIDRList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *IPAddressSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPAddressSpec{`, - `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ParentReference) String() string { +func (this *ClusterCIDRSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ParentReference{`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&ClusterCIDRSpec{`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, + `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, + `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, `}`, }, "") return s } -func (this *ServiceCIDR) String() string { +func (this *IPAddress) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ServiceCIDR{`, + s := strings.Join([]string{`&IPAddress{`, `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IPAddressSpec", "IPAddressSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ServiceCIDRList) String() string { +func (this *IPAddressList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ServiceCIDR{" + repeatedStringForItems := "[]IPAddress{" for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + "," + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IPAddress", "IPAddress", 1), `&`, ``, 1) + "," } repeatedStringForItems += "}" - s := strings.Join([]string{`&ServiceCIDRList{`, + s := strings.Join([]string{`&IPAddressList{`, `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s } -func (this *ServiceCIDRSpec) String() string { +func (this *IPAddressSpec) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&ServiceCIDRSpec{`, - `CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`, + s := strings.Join([]string{`&IPAddressSpec{`, + `ParentRef:` + strings.Replace(this.ParentRef.String(), "ParentReference", "ParentReference", 1) + `,`, `}`, }, "") return s } -func (this *ServiceCIDRStatus) String() string { +func (this *ParentReference) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ServiceCIDRStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, + s := strings.Join([]string{`&ParentReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s @@ -915,7 +841,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *IPAddress) Unmarshal(dAtA []byte) error { +func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -938,10 +864,10 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1031,7 +957,7 @@ func (m *IPAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *IPAddressList) Unmarshal(dAtA []byte) error { +func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1054,10 +980,10 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1090,96 +1016,12 @@ func (m *IPAddressList) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, IPAddress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1206,10 +1048,8 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ParentRef == nil { - m.ParentRef = &ParentReference{} - } - if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ClusterCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1234,7 +1074,7 @@ func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ParentReference) Unmarshal(dAtA []byte) error { +func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1257,17 +1097,17 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1277,29 +1117,33 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) } - var stringLen uint64 + m.PerNodeHostBits = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1309,27 +1153,14 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.PerNodeHostBits |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1357,11 +1188,11 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.IPv4 = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1389,7 +1220,7 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.IPv6 = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1412,7 +1243,7 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { +func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1435,10 +1266,10 @@ func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1507,39 +1338,6 @@ func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1561,7 +1359,7 @@ func (m *ServiceCIDR) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { +func (m *IPAddressList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1584,10 +1382,10 @@ func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1652,7 +1450,7 @@ func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ServiceCIDR{}) + m.Items = append(m.Items, IPAddress{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1678,7 +1476,7 @@ func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { +func (m *IPAddressSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1701,17 +1499,17 @@ func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group") + return fmt.Errorf("proto: IPAddressSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: IPAddressSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ParentRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1721,23 +1519,27 @@ func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex])) + if m.ParentRef == nil { + m.ParentRef = &ParentReference{} + } + if err := m.ParentRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1760,7 +1562,7 @@ func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { +func (m *ParentReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1783,17 +1585,17 @@ func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ParentReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ParentReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1803,25 +1605,151 @@ func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/k8s.io/api/networking/v1alpha1/generated.proto b/vendor/k8s.io/api/networking/v1alpha1/generated.proto index fb7971745d4..0f1f30d7011 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/networking/v1alpha1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.networking.v1alpha1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -28,6 +29,69 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/networking/v1alpha1"; +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +message ClusterCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the ClusterCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ClusterCIDRSpec spec = 2; +} + +// ClusterCIDRList contains a list of ClusterCIDR. +message ClusterCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of ClusterCIDRs. + repeated ClusterCIDR items = 2; +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +message ClusterCIDRSpec { + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. + // This field is immutable. + // +optional + optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; + + // perNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + optional int32 perNodeHostBits = 2; + + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + optional string ipv4 = 3; + + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + optional string ipv6 = 4; +} + // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, @@ -83,56 +147,9 @@ message ParentReference { // Name is the name of the object being referenced. // +required optional string name = 4; -} - -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). -// This range is used to allocate ClusterIPs to Service objects. -message ServiceCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ServiceCIDRSpec spec = 2; - - // status represents the current state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ServiceCIDRStatus status = 3; -} - -// ServiceCIDRList contains a list of ServiceCIDR objects. -message ServiceCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ServiceCIDRs. - repeated ServiceCIDR items = 2; -} - -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. -message ServiceCIDRSpec { - // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") - // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. - // This field is immutable. - // +optional - repeated string cidrs = 1; -} -// ServiceCIDRStatus describes the current state of the ServiceCIDR. -message ServiceCIDRStatus { - // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. - // Current service state + // UID is the uid of the object being referenced. // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1; + optional string uid = 5; } diff --git a/vendor/k8s.io/api/networking/v1alpha1/register.go b/vendor/k8s.io/api/networking/v1alpha1/register.go index c8f5856b5dc..8dda6394d47 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/register.go +++ b/vendor/k8s.io/api/networking/v1alpha1/register.go @@ -52,10 +52,10 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterCIDR{}, + &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, - &ServiceCIDR{}, - &ServiceCIDRList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/networking/v1alpha1/types.go b/vendor/k8s.io/api/networking/v1alpha1/types.go index 9d56ca193e6..52e4a11e8b1 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types.go @@ -17,9 +17,86 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.25 + +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +type ClusterCIDR struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the desired state of the ClusterCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +type ClusterCIDRSpec struct { + // nodeSelector defines which nodes the config is applicable to. + // An empty or nil nodeSelector selects all nodes. + // This field is immutable. + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // perNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` + + // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` + + // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). + // At least one of ipv4 and ipv6 must be specified. + // This field is immutable. + // +optional + IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.25 + +// ClusterCIDRList contains a list of ClusterCIDR. +type ClusterCIDRList struct { + metav1.TypeMeta `json:",inline"` + + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of ClusterCIDRs. + Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,6 +143,9 @@ type ParentReference struct { // Name is the name of the object being referenced. // +required Name string `json:"name,omitempty" protobuf:"bytes,4,opt,name=name"` + // UID is the uid of the object being referenced. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -81,70 +161,3 @@ type IPAddressList struct { // items is the list of IPAddresses. Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). -// This range is used to allocate ClusterIPs to Service objects. -type ServiceCIDR struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // spec is the desired state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // status represents the current state of the ServiceCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services. -type ServiceCIDRSpec struct { - // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64") - // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. - // This field is immutable. - // +optional - CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"` -} - -const ( - // ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the - // apiserver to allocate ClusterIPs for Services. - ServiceCIDRConditionReady = "Ready" - // ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is - // being deleted. - ServiceCIDRReasonTerminating = "Terminating" -) - -// ServiceCIDRStatus describes the current state of the ServiceCIDR. -type ServiceCIDRStatus struct { - // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. - // Current service state - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.27 - -// ServiceCIDRList contains a list of ServiceCIDR objects. -type ServiceCIDRList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // items is the list of ServiceCIDRs. - Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 481ec06030b..85304784f4e 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -27,6 +27,38 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterCIDR = map[string]string{ + "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ClusterCIDR) SwaggerDoc() map[string]string { + return map_ClusterCIDR +} + +var map_ClusterCIDRList = map[string]string{ + "": "ClusterCIDRList contains a list of ClusterCIDR.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of ClusterCIDRs.", +} + +func (ClusterCIDRList) SwaggerDoc() map[string]string { + return map_ClusterCIDRList +} + +var map_ClusterCIDRSpec = map[string]string{ + "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", + "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", + "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", +} + +func (ClusterCIDRSpec) SwaggerDoc() map[string]string { + return map_ClusterCIDRSpec +} + var map_IPAddress = map[string]string{ "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -62,49 +94,11 @@ var map_ParentReference = map[string]string{ "resource": "Resource is the resource of the object being referenced.", "namespace": "Namespace is the namespace of the object being referenced.", "name": "Name is the name of the object being referenced.", + "uid": "UID is the uid of the object being referenced.", } func (ParentReference) SwaggerDoc() map[string]string { return map_ParentReference } -var map_ServiceCIDR = map[string]string{ - "": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ServiceCIDR) SwaggerDoc() map[string]string { - return map_ServiceCIDR -} - -var map_ServiceCIDRList = map[string]string{ - "": "ServiceCIDRList contains a list of ServiceCIDR objects.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ServiceCIDRs.", -} - -func (ServiceCIDRList) SwaggerDoc() map[string]string { - return map_ServiceCIDRList -} - -var map_ServiceCIDRSpec = map[string]string{ - "": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", - "cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", -} - -func (ServiceCIDRSpec) SwaggerDoc() map[string]string { - return map_ServiceCIDRSpec -} - -var map_ServiceCIDRStatus = map[string]string{ - "": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", - "conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", -} - -func (ServiceCIDRStatus) SwaggerDoc() map[string]string { - return map_ServiceCIDRStatus -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index 5c8f697ba36..97db2eacc95 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -22,12 +22,12 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddress) DeepCopyInto(out *IPAddress) { +func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -35,18 +35,18 @@ func (in *IPAddress) DeepCopyInto(out *IPAddress) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. -func (in *IPAddress) DeepCopy() *IPAddress { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. +func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { if in == nil { return nil } - out := new(IPAddress) + out := new(ClusterCIDR) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddress) DeepCopyObject() runtime.Object { +func (in *ClusterCIDR) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -54,13 +54,13 @@ func (in *IPAddress) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { +func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]IPAddress, len(*in)) + *out = make([]ClusterCIDR, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -68,18 +68,18 @@ func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. -func (in *IPAddressList) DeepCopy() *IPAddressList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. +func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { if in == nil { return nil } - out := new(IPAddressList) + out := new(ClusterCIDRList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IPAddressList) DeepCopyObject() runtime.Object { +func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -87,64 +87,47 @@ func (in *IPAddressList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { +func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { *out = *in - if in.ParentRef != nil { - in, out := &in.ParentRef, &out.ParentRef - *out = new(ParentReference) - **out = **in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. -func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { - if in == nil { - return nil - } - out := new(IPAddressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParentReference) DeepCopyInto(out *ParentReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. -func (in *ParentReference) DeepCopy() *ParentReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. +func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { if in == nil { return nil } - out := new(ParentReference) + out := new(ClusterCIDRSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) { +func (in *IPAddress) DeepCopyInto(out *IPAddress) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR. -func (in *ServiceCIDR) DeepCopy() *ServiceCIDR { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress. +func (in *IPAddress) DeepCopy() *IPAddress { if in == nil { return nil } - out := new(ServiceCIDR) + out := new(IPAddress) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCIDR) DeepCopyObject() runtime.Object { +func (in *IPAddress) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -152,13 +135,13 @@ func (in *ServiceCIDR) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { +func (in *IPAddressList) DeepCopyInto(out *IPAddressList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]ServiceCIDR, len(*in)) + *out = make([]IPAddress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -166,18 +149,18 @@ func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList. -func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList. +func (in *IPAddressList) DeepCopy() *IPAddressList { if in == nil { return nil } - out := new(ServiceCIDRList) + out := new(IPAddressList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { +func (in *IPAddressList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -185,45 +168,38 @@ func (in *ServiceCIDRList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) { +func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) { *out = *in - if in.CIDRs != nil { - in, out := &in.CIDRs, &out.CIDRs - *out = make([]string, len(*in)) - copy(*out, *in) + if in.ParentRef != nil { + in, out := &in.ParentRef, &out.ParentRef + *out = new(ParentReference) + **out = **in } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec. -func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec. +func (in *IPAddressSpec) DeepCopy() *IPAddressSpec { if in == nil { return nil } - out := new(ServiceCIDRSpec) + out := new(IPAddressSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) { +func (in *ParentReference) DeepCopyInto(out *ParentReference) { *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus. -func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference. +func (in *ParentReference) DeepCopy() *ParentReference { if in == nil { return nil } - out := new(ServiceCIDRStatus) + out := new(ParentReference) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index 714e7b6253b..60438ba59fc 100644 --- a/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -23,72 +23,72 @@ package v1alpha1 // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { - return 1, 27 +func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 25 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { - return 1, 30 +func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 28 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddress) APILifecycleRemoved() (major, minor int) { - return 1, 33 +func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 31 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { - return 1, 27 +func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 25 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { - return 1, 30 +func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 28 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { - return 1, 33 +func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 31 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) { +func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) { +func (in *IPAddress) APILifecycleDeprecated() (major, minor int) { return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) { +func (in *IPAddress) APILifecycleRemoved() (major, minor int) { return 1, 33 } // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) { +func (in *IPAddressList) APILifecycleIntroduced() (major, minor int) { return 1, 27 } // APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) { +func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) { return 1, 30 } // APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) { +func (in *IPAddressList) APILifecycleRemoved() (major, minor int) { return 1, 33 } diff --git a/vendor/k8s.io/api/policy/v1/doc.go b/vendor/k8s.io/api/policy/v1/doc.go index 177cdf52368..b46af58e43c 100644 --- a/vendor/k8s.io/api/policy/v1/doc.go +++ b/vendor/k8s.io/api/policy/v1/doc.go @@ -19,6 +19,6 @@ limitations under the License. // +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. package v1 // import "k8s.io/api/policy/v1" diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 76da54b4c73..9e9c7d13abc 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -20,6 +20,6 @@ limitations under the License. // +k8s:prerelease-lifecycle-gen=true // Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index efba41b3fda..0b75d641541 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -26,6 +26,8 @@ import ( proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" @@ -47,10 +49,94 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + func (m *Eviction) Reset() { *m = Eviction{} } func (*Eviction) ProtoMessage() {} func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{0} + return fileDescriptor_014060e454a820dc, []int{3} } func (m *Eviction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -75,10 +161,94 @@ func (m *Eviction) XXX_DiscardUnknown() { var xxx_messageInfo_Eviction proto.InternalMessageInfo +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{4} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{5} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{6} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } func (*PodDisruptionBudget) ProtoMessage() {} func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{1} + return fileDescriptor_014060e454a820dc, []int{7} } func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -106,7 +276,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } func (*PodDisruptionBudgetList) ProtoMessage() {} func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{2} + return fileDescriptor_014060e454a820dc, []int{8} } func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -134,7 +304,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } func (*PodDisruptionBudgetSpec) ProtoMessage() {} func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{3} + return fileDescriptor_014060e454a820dc, []int{9} } func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -162,7 +332,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{4} + return fileDescriptor_014060e454a820dc, []int{10} } func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,13 +357,251 @@ func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{11} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{12} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{13} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo + +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{14} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{15} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{16} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{17} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } func init() { @@ -201,64 +609,132 @@ func init() { } var fileDescriptor_014060e454a820dc = []byte{ - // 857 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0xc7, 0x45, 0xcb, 0x72, 0x9d, 0xad, 0x24, 0xb8, 0xdb, 0x2f, 0x5b, 0x07, 0x2a, 0xd0, 0x29, - 0x28, 0xd0, 0x65, 0x9d, 0x04, 0x85, 0xd1, 0x43, 0x9b, 0x30, 0x32, 0xd2, 0x14, 0x31, 0x6c, 0xac, - 0x9c, 0x4b, 0x91, 0x02, 0x5d, 0x91, 0x13, 0x69, 0x2b, 0x92, 0x4b, 0x70, 0x97, 0x6c, 0x74, 0xcb, - 0xa1, 0x0f, 0xd0, 0xf7, 0xe8, 0x83, 0xd4, 0x87, 0x1e, 0xd2, 0x5b, 0xd0, 0x83, 0x50, 0xb3, 0x6f, - 0xd1, 0x53, 0xc1, 0x25, 0xf5, 0x41, 0x7d, 0x34, 0x4a, 0x0e, 0xb9, 0x71, 0x67, 0xe6, 0xff, 0x1b, - 0xce, 0xc7, 0x52, 0x42, 0xf6, 0xe8, 0x44, 0x12, 0x2e, 0xac, 0x51, 0xdc, 0x87, 0x28, 0x00, 0x05, - 0xd2, 0x4a, 0x20, 0x70, 0x45, 0x64, 0x15, 0x0e, 0x16, 0x72, 0x2b, 0x14, 0x1e, 0x77, 0xc6, 0x56, - 0x72, 0xdc, 0x07, 0xc5, 0x8e, 0xad, 0x01, 0x04, 0x10, 0x31, 0x05, 0x2e, 0x09, 0x23, 0xa1, 0x04, - 0x3e, 0xca, 0x43, 0x09, 0x0b, 0x39, 0xc9, 0x43, 0x49, 0x11, 0xda, 0xfa, 0x7c, 0xc0, 0xd5, 0x30, - 0xee, 0x13, 0x47, 0xf8, 0xd6, 0x40, 0x0c, 0x84, 0xa5, 0x15, 0xfd, 0xf8, 0x99, 0x3e, 0xe9, 0x83, - 0x7e, 0xca, 0x49, 0xad, 0xbb, 0xf3, 0xa4, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0xa3, - 0x41, 0x66, 0x90, 0x96, 0x0f, 0x8a, 0x59, 0xc9, 0x4a, 0xfe, 0x96, 0xb5, 0x49, 0x15, 0xc5, 0x81, - 0xe2, 0x3e, 0xac, 0x08, 0xbe, 0x7c, 0x9d, 0x40, 0x3a, 0x43, 0xf0, 0xd9, 0x8a, 0xee, 0xce, 0x26, - 0x5d, 0xac, 0xb8, 0x67, 0xf1, 0x40, 0x49, 0x15, 0x2d, 0x8b, 0x3a, 0x7f, 0x19, 0x68, 0xff, 0x34, - 0xe1, 0x8e, 0xe2, 0x22, 0xc0, 0x3f, 0xa2, 0xfd, 0xac, 0x0a, 0x97, 0x29, 0x76, 0x68, 0xdc, 0x34, - 0x6e, 0xbd, 0x7f, 0xfb, 0x0b, 0x32, 0xef, 0xde, 0x0c, 0x4a, 0xc2, 0xd1, 0x20, 0x33, 0x48, 0x92, - 0x45, 0x93, 0xe4, 0x98, 0x9c, 0xf7, 0x7f, 0x02, 0x47, 0x9d, 0x81, 0x62, 0x36, 0xbe, 0x9a, 0xb4, - 0x2b, 0xe9, 0xa4, 0x8d, 0xe6, 0x36, 0x3a, 0xa3, 0x62, 0x0f, 0x35, 0x5c, 0xf0, 0x40, 0xc1, 0x79, - 0x98, 0x65, 0x94, 0x87, 0x3b, 0x3a, 0xcd, 0x9d, 0xed, 0xd2, 0x74, 0x17, 0xa5, 0xf6, 0x07, 0xe9, - 0xa4, 0xdd, 0x28, 0x99, 0x68, 0x19, 0xde, 0xf9, 0x6d, 0x07, 0x7d, 0x78, 0x21, 0xdc, 0x2e, 0x97, - 0x51, 0xac, 0x4d, 0x76, 0xec, 0x0e, 0x40, 0xbd, 0x83, 0x3a, 0x2f, 0xd1, 0xae, 0x0c, 0xc1, 0x29, - 0xca, 0xbb, 0x4d, 0x36, 0xee, 0x20, 0x59, 0xf3, 0x7e, 0xbd, 0x10, 0x1c, 0xbb, 0x5e, 0xf0, 0x77, - 0xb3, 0x13, 0xd5, 0x34, 0xfc, 0x14, 0xed, 0x49, 0xc5, 0x54, 0x2c, 0x0f, 0xab, 0x9a, 0x7b, 0xf7, - 0x0d, 0xb9, 0x5a, 0x6b, 0x37, 0x0b, 0xf2, 0x5e, 0x7e, 0xa6, 0x05, 0xb3, 0xf3, 0x87, 0x81, 0x3e, - 0x5d, 0xa3, 0x7a, 0xcc, 0xa5, 0xc2, 0x4f, 0x57, 0x3a, 0x46, 0xb6, 0xeb, 0x58, 0xa6, 0xd6, 0xfd, - 0x3a, 0x28, 0xb2, 0xee, 0x4f, 0x2d, 0x0b, 0xdd, 0xea, 0xa1, 0x1a, 0x57, 0xe0, 0x67, 0xdb, 0x50, - 0x5d, 0x42, 0x6f, 0x51, 0x96, 0xdd, 0x28, 0xd0, 0xb5, 0x47, 0x19, 0x84, 0xe6, 0xac, 0xce, 0x9f, - 0xd5, 0xb5, 0xe5, 0x64, 0xed, 0xc4, 0xcf, 0x50, 0xdd, 0xe7, 0xc1, 0xfd, 0x84, 0x71, 0x8f, 0xf5, - 0x3d, 0x78, 0xed, 0x12, 0x64, 0x37, 0x88, 0xe4, 0x37, 0x88, 0x3c, 0x0a, 0xd4, 0x79, 0xd4, 0x53, - 0x11, 0x0f, 0x06, 0xf6, 0x41, 0x3a, 0x69, 0xd7, 0xcf, 0x16, 0x48, 0xb4, 0xc4, 0xc5, 0x3f, 0xa0, - 0x7d, 0x09, 0x1e, 0x38, 0x4a, 0x44, 0x6f, 0xb6, 0xe9, 0x8f, 0x59, 0x1f, 0xbc, 0x5e, 0x21, 0xb5, - 0xeb, 0x59, 0xdf, 0xa6, 0x27, 0x3a, 0x43, 0x62, 0x0f, 0x35, 0x7d, 0xf6, 0xfc, 0x49, 0xc0, 0x66, - 0x85, 0x54, 0xdf, 0xb2, 0x10, 0x9c, 0x4e, 0xda, 0xcd, 0xb3, 0x12, 0x8b, 0x2e, 0xb1, 0xf1, 0x0b, - 0x03, 0xb5, 0xe2, 0x60, 0x08, 0xcc, 0x53, 0xc3, 0xf1, 0x85, 0x70, 0xa7, 0x9f, 0x8d, 0x0b, 0x3d, - 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9, 0x18, 0xf5, - 0xef, 0xa4, 0x6d, 0x6e, 0xf6, 0x5e, 0x8e, 0x43, 0xa0, 0xff, 0x93, 0xa3, 0xf3, 0x7b, 0x0d, 0x1d, - 0x6d, 0x5c, 0x6c, 0xfc, 0x1d, 0xc2, 0xa2, 0x2f, 0x21, 0x4a, 0xc0, 0x7d, 0x98, 0x7f, 0xe6, 0xb8, - 0x08, 0xf4, 0x6c, 0xab, 0x76, 0xab, 0xd8, 0x11, 0x7c, 0xbe, 0x12, 0x41, 0xd7, 0xa8, 0xf0, 0x2f, - 0x06, 0x6a, 0xb8, 0x79, 0x1a, 0x70, 0x2f, 0x84, 0x3b, 0xdd, 0xcd, 0x87, 0x6f, 0x73, 0xe5, 0x48, - 0x77, 0x91, 0x74, 0x1a, 0xa8, 0x68, 0x6c, 0x7f, 0x5c, 0xbc, 0x50, 0xa3, 0xe4, 0xa3, 0xe5, 0xa4, - 0x59, 0x49, 0xee, 0x0c, 0x29, 0xef, 0x7b, 0x9e, 0xf8, 0x19, 0x5c, 0x3d, 0xe5, 0xda, 0xbc, 0xa4, - 0xee, 0x4a, 0x04, 0x5d, 0xa3, 0xc2, 0x5f, 0xa3, 0xa6, 0x13, 0x47, 0x11, 0x04, 0xea, 0xdb, 0xbc, - 0xbf, 0x7a, 0x64, 0x35, 0xfb, 0x93, 0x82, 0xd3, 0x7c, 0x50, 0xf2, 0xd2, 0xa5, 0xe8, 0x4c, 0xef, - 0x82, 0xe4, 0x11, 0xb8, 0x53, 0x7d, 0xad, 0xac, 0xef, 0x96, 0xbc, 0x74, 0x29, 0x1a, 0x9f, 0xa0, - 0x3a, 0x3c, 0x0f, 0xc1, 0x99, 0x36, 0x74, 0x4f, 0xab, 0x3f, 0x2a, 0xd4, 0xf5, 0xd3, 0x05, 0x1f, - 0x2d, 0x45, 0x62, 0x07, 0x21, 0x47, 0x04, 0x2e, 0xcf, 0x7f, 0x32, 0xde, 0xd3, 0x83, 0xb0, 0xb6, - 0xbb, 0x48, 0x0f, 0xa6, 0xba, 0xf9, 0x07, 0x7b, 0x66, 0x92, 0x74, 0x01, 0xdb, 0xf2, 0x10, 0x5e, - 0x1d, 0x13, 0x3e, 0x40, 0xd5, 0x11, 0x8c, 0xf5, 0x12, 0xdd, 0xa0, 0xd9, 0x23, 0xbe, 0x87, 0x6a, - 0x09, 0xf3, 0x62, 0x28, 0x2e, 0xf4, 0x67, 0xdb, 0xbd, 0xc7, 0x25, 0xf7, 0x81, 0xe6, 0xc2, 0xaf, - 0x76, 0x4e, 0x0c, 0xfb, 0x9b, 0xab, 0x6b, 0xb3, 0xf2, 0xf2, 0xda, 0xac, 0xbc, 0xba, 0x36, 0x2b, - 0x2f, 0x52, 0xd3, 0xb8, 0x4a, 0x4d, 0xe3, 0x65, 0x6a, 0x1a, 0xaf, 0x52, 0xd3, 0xf8, 0x3b, 0x35, - 0x8d, 0x5f, 0xff, 0x31, 0x2b, 0xdf, 0x1f, 0x6d, 0xfc, 0x9b, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x96, 0x9a, 0x3a, 0xb5, 0x1b, 0x09, 0x00, 0x00, + // 1946 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x5b, 0x73, 0xdb, 0xc6, + 0x15, 0x16, 0x4c, 0x5d, 0xa8, 0xd5, 0xc5, 0xe2, 0xea, 0x62, 0x48, 0x69, 0x08, 0x07, 0x99, 0xe9, + 0xb8, 0x69, 0x0a, 0xc6, 0xb2, 0xe3, 0x7a, 0x9a, 0x5e, 0x2c, 0x88, 0x92, 0xad, 0x8c, 0x65, 0xb1, + 0x4b, 0x2b, 0xd3, 0x76, 0xdc, 0x4e, 0x97, 0xc0, 0x8a, 0x44, 0x04, 0x02, 0x28, 0x76, 0xc1, 0x88, + 0x6f, 0x79, 0xe8, 0x43, 0x1f, 0xfb, 0x07, 0x32, 0xfd, 0x01, 0x9d, 0x3e, 0xf5, 0x47, 0xd4, 0x99, + 0xe9, 0x74, 0xd2, 0xb7, 0x4c, 0x1f, 0x38, 0x35, 0xfb, 0x2f, 0xfc, 0xd4, 0xc1, 0x72, 0x01, 0x12, + 0x37, 0xd2, 0xce, 0x8c, 0xfd, 0x46, 0xec, 0xf9, 0xbe, 0xef, 0xec, 0x9e, 0xdd, 0x3d, 0x67, 0x77, + 0x09, 0xf4, 0xcb, 0xfb, 0x54, 0xb3, 0xdc, 0xda, 0x65, 0xd0, 0x22, 0xbe, 0x43, 0x18, 0xa1, 0xb5, + 0x1e, 0x71, 0x4c, 0xd7, 0xaf, 0x09, 0x03, 0xf6, 0xac, 0x9a, 0xe7, 0xda, 0x96, 0xd1, 0xaf, 0xf5, + 0x6e, 0xb7, 0x08, 0xc3, 0xb7, 0x6b, 0x6d, 0xe2, 0x10, 0x1f, 0x33, 0x62, 0x6a, 0x9e, 0xef, 0x32, + 0x17, 0xee, 0x8e, 0xa0, 0x1a, 0xf6, 0x2c, 0x6d, 0x04, 0xd5, 0x04, 0x74, 0xef, 0x47, 0x6d, 0x8b, + 0x75, 0x82, 0x96, 0x66, 0xb8, 0xdd, 0x5a, 0xdb, 0x6d, 0xbb, 0x35, 0xce, 0x68, 0x05, 0x17, 0xfc, + 0x8b, 0x7f, 0xf0, 0x5f, 0x23, 0xa5, 0x3d, 0x75, 0xc2, 0xa9, 0xe1, 0xfa, 0xa4, 0xd6, 0xcb, 0x78, + 0xdb, 0xbb, 0x3b, 0xc6, 0x74, 0xb1, 0xd1, 0xb1, 0x1c, 0xe2, 0xf7, 0x6b, 0xde, 0x65, 0x3b, 0x6c, + 0xa0, 0xb5, 0x2e, 0x61, 0x38, 0x8f, 0x55, 0x2b, 0x62, 0xf9, 0x81, 0xc3, 0xac, 0x2e, 0xc9, 0x10, + 0xee, 0xcd, 0x22, 0x50, 0xa3, 0x43, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0x98, 0x65, 0xd7, + 0x2c, 0x87, 0x51, 0xe6, 0xa7, 0x49, 0xea, 0x5d, 0xb0, 0x71, 0x60, 0xdb, 0xee, 0x17, 0xc4, 0x3c, + 0x6c, 0x9e, 0xd4, 0x7d, 0xab, 0x47, 0x7c, 0x78, 0x13, 0xcc, 0x3b, 0xb8, 0x4b, 0x64, 0xe9, 0xa6, + 0x74, 0x6b, 0x59, 0x5f, 0x7d, 0x3e, 0x50, 0xe6, 0x86, 0x03, 0x65, 0xfe, 0x09, 0xee, 0x12, 0xc4, + 0x2d, 0xea, 0x27, 0xa0, 0x22, 0x58, 0xc7, 0x36, 0xb9, 0xfa, 0xcc, 0xb5, 0x83, 0x2e, 0x81, 0xdf, + 0x07, 0x8b, 0x26, 0x17, 0x10, 0xc4, 0x75, 0x41, 0x5c, 0x1c, 0xc9, 0x22, 0x61, 0x55, 0x29, 0xb8, + 0x2e, 0xc8, 0x8f, 0x5c, 0xca, 0x1a, 0x98, 0x75, 0xe0, 0x3e, 0x00, 0x1e, 0x66, 0x9d, 0x86, 0x4f, + 0x2e, 0xac, 0x2b, 0x41, 0x87, 0x82, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0xec, + 0x13, 0x6c, 0x9e, 0x39, 0x76, 0x5f, 0xbe, 0x76, 0x53, 0xba, 0x55, 0xd6, 0x37, 0x04, 0xa3, 0x8c, + 0x44, 0x3b, 0x8a, 0x11, 0xea, 0x7f, 0x24, 0x50, 0x3e, 0xea, 0x59, 0x06, 0xb3, 0x5c, 0x07, 0xfe, + 0x1e, 0x94, 0xc3, 0xd9, 0x32, 0x31, 0xc3, 0xdc, 0xd9, 0xca, 0xfe, 0x47, 0xda, 0x78, 0x25, 0xc5, + 0xc1, 0xd3, 0xbc, 0xcb, 0x76, 0xd8, 0x40, 0xb5, 0x10, 0xad, 0xf5, 0x6e, 0x6b, 0x67, 0xad, 0xcf, + 0x89, 0xc1, 0x4e, 0x09, 0xc3, 0xe3, 0xee, 0x8d, 0xdb, 0x50, 0xac, 0x0a, 0x6d, 0xb0, 0x66, 0x12, + 0x9b, 0x30, 0x72, 0xe6, 0x85, 0x1e, 0x29, 0xef, 0xe1, 0xca, 0xfe, 0x9d, 0x57, 0x73, 0x53, 0x9f, + 0xa4, 0xea, 0x95, 0xe1, 0x40, 0x59, 0x4b, 0x34, 0xa1, 0xa4, 0xb8, 0xfa, 0x95, 0x04, 0x76, 0x8e, + 0x9b, 0x0f, 0x7d, 0x37, 0xf0, 0x9a, 0x2c, 0x9c, 0xdd, 0x76, 0x5f, 0x98, 0xe0, 0x8f, 0xc1, 0xbc, + 0x1f, 0xd8, 0xd1, 0x5c, 0xbe, 0x1f, 0xcd, 0x25, 0x0a, 0x6c, 0xf2, 0x72, 0xa0, 0x6c, 0xa6, 0x58, + 0x4f, 0xfb, 0x1e, 0x41, 0x9c, 0x00, 0x3f, 0x05, 0x8b, 0x3e, 0x76, 0xda, 0x24, 0xec, 0x7a, 0xe9, + 0xd6, 0xca, 0xbe, 0xaa, 0x15, 0xee, 0x35, 0xed, 0xa4, 0x8e, 0x42, 0xe8, 0x78, 0xc6, 0xf9, 0x27, + 0x45, 0x42, 0x41, 0x3d, 0x05, 0x6b, 0x7c, 0xaa, 0x5d, 0x9f, 0x71, 0x0b, 0x7c, 0x17, 0x94, 0xba, + 0x96, 0xc3, 0x3b, 0xb5, 0xa0, 0xaf, 0x08, 0x56, 0xe9, 0xd4, 0x72, 0x50, 0xd8, 0xce, 0xcd, 0xf8, + 0x8a, 0xc7, 0x6c, 0xd2, 0x8c, 0xaf, 0x50, 0xd8, 0xae, 0x3e, 0x04, 0x4b, 0xc2, 0xe3, 0xa4, 0x50, + 0x69, 0xba, 0x50, 0x29, 0x47, 0xe8, 0xaf, 0xd7, 0xc0, 0x66, 0xc3, 0x35, 0xeb, 0x16, 0xf5, 0x03, + 0x1e, 0x2f, 0x3d, 0x30, 0xdb, 0x84, 0xbd, 0x85, 0xf5, 0xf1, 0x14, 0xcc, 0x53, 0x8f, 0x18, 0x62, + 0x59, 0xec, 0x4f, 0x89, 0x6d, 0x4e, 0xff, 0x9a, 0x1e, 0x31, 0xc6, 0xdb, 0x32, 0xfc, 0x42, 0x5c, + 0x0d, 0x3e, 0x03, 0x8b, 0x94, 0x61, 0x16, 0x50, 0xb9, 0xc4, 0x75, 0xef, 0xbe, 0xa6, 0x2e, 0xe7, + 0x8e, 0x67, 0x71, 0xf4, 0x8d, 0x84, 0xa6, 0xfa, 0x4f, 0x09, 0xdc, 0xc8, 0x61, 0x3d, 0xb6, 0x28, + 0x83, 0xcf, 0x32, 0x11, 0xd3, 0x5e, 0x2d, 0x62, 0x21, 0x9b, 0xc7, 0x2b, 0xde, 0xbc, 0x51, 0xcb, + 0x44, 0xb4, 0x9a, 0x60, 0xc1, 0x62, 0xa4, 0x1b, 0x2d, 0x45, 0xed, 0xf5, 0x86, 0xa5, 0xaf, 0x09, + 0xe9, 0x85, 0x93, 0x50, 0x04, 0x8d, 0xb4, 0xd4, 0x7f, 0x97, 0x72, 0x87, 0x13, 0x86, 0x13, 0x5e, + 0x80, 0xd5, 0xae, 0xe5, 0x1c, 0xf4, 0xb0, 0x65, 0xe3, 0x96, 0xd8, 0x3d, 0xd3, 0x16, 0x41, 0x98, + 0x61, 0xb5, 0x51, 0x86, 0xd5, 0x4e, 0x1c, 0x76, 0xe6, 0x37, 0x99, 0x6f, 0x39, 0x6d, 0x7d, 0x63, + 0x38, 0x50, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x16, 0x94, 0x29, 0xb1, 0x89, 0xc1, + 0x5c, 0xff, 0xf5, 0x32, 0xc4, 0x63, 0xdc, 0x22, 0x76, 0x53, 0x50, 0xf5, 0xd5, 0x30, 0x6e, 0xd1, + 0x17, 0x8a, 0x25, 0xa1, 0x0d, 0xd6, 0xbb, 0xf8, 0xea, 0xdc, 0xc1, 0xf1, 0x40, 0x4a, 0xdf, 0x71, + 0x20, 0x70, 0x38, 0x50, 0xd6, 0x4f, 0x13, 0x5a, 0x28, 0xa5, 0x0d, 0xbf, 0x94, 0xc0, 0x5e, 0xe0, + 0x74, 0x08, 0xb6, 0x59, 0xa7, 0xdf, 0x70, 0xcd, 0x28, 0xdd, 0x36, 0xf8, 0x0c, 0xc9, 0xf3, 0x3c, + 0x03, 0x3d, 0x18, 0x0e, 0x94, 0xbd, 0xf3, 0x42, 0xd4, 0xcb, 0x81, 0x52, 0x2d, 0xb6, 0xf2, 0xf4, + 0x34, 0xc5, 0x87, 0xfa, 0x8f, 0x05, 0xb0, 0x5b, 0xb8, 0xb0, 0xe1, 0xa7, 0x00, 0xba, 0x2d, 0x4a, + 0xfc, 0x1e, 0x31, 0x1f, 0x8e, 0xca, 0xa0, 0xe5, 0x46, 0xb9, 0x63, 0x4f, 0xac, 0x11, 0x78, 0x96, + 0x41, 0xa0, 0x1c, 0x16, 0xfc, 0xa3, 0x04, 0xd6, 0xcc, 0x91, 0x1b, 0x62, 0x36, 0x5c, 0x33, 0x5a, + 0x9b, 0x0f, 0xbf, 0xcb, 0x96, 0xd3, 0xea, 0x93, 0x4a, 0x47, 0x0e, 0xf3, 0xfb, 0xfa, 0xb6, 0xe8, + 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xc3, 0x21, 0x99, 0xb1, 0x24, 0x15, 0x65, 0x95, 0xcf, 0xf2, + 0xc2, 0x78, 0x48, 0xf5, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0xcf, 0xc1, 0xba, 0x11, 0xf8, 0x3e, 0x71, + 0xd8, 0xa3, 0x51, 0x7c, 0xf9, 0x94, 0x2d, 0xe8, 0x3b, 0x42, 0x67, 0xfd, 0x30, 0x61, 0x45, 0x29, + 0x74, 0xc8, 0x37, 0x09, 0xb5, 0x7c, 0x62, 0x46, 0xfc, 0x85, 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, + 0x1a, 0xde, 0x07, 0xab, 0xe4, 0xca, 0x23, 0x46, 0x14, 0xd0, 0x45, 0xce, 0xde, 0x12, 0xec, 0xd5, + 0xa3, 0x09, 0x1b, 0x4a, 0x20, 0xa1, 0x01, 0x80, 0xe1, 0x3a, 0xa6, 0x35, 0x2a, 0xb5, 0x4b, 0x7c, + 0x22, 0x6a, 0xaf, 0xb6, 0x91, 0x0e, 0x23, 0xde, 0x38, 0x61, 0xc7, 0x4d, 0x14, 0x4d, 0xc8, 0xee, + 0xd9, 0x00, 0x66, 0xa7, 0x09, 0x6e, 0x80, 0xd2, 0x25, 0xe9, 0x8f, 0xca, 0x2b, 0x0a, 0x7f, 0xc2, + 0x07, 0x60, 0xa1, 0x87, 0xed, 0x80, 0x88, 0x0d, 0xfd, 0xc1, 0xab, 0xf5, 0xe3, 0xa9, 0xd5, 0x25, + 0x68, 0x44, 0xfc, 0xc9, 0xb5, 0xfb, 0x92, 0xfa, 0xb5, 0x04, 0x2a, 0x0d, 0xd7, 0x6c, 0x12, 0x23, + 0xf0, 0x2d, 0xd6, 0x1f, 0xad, 0xef, 0xb7, 0x50, 0x98, 0x50, 0xa2, 0x30, 0x7d, 0x34, 0x7d, 0x35, + 0x27, 0x7b, 0x57, 0x54, 0x96, 0xd4, 0xe7, 0x12, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0xd9, 0xf8, 0x65, + 0xb2, 0x6c, 0x7c, 0xf8, 0x3a, 0x83, 0x29, 0x28, 0x1a, 0x5f, 0x57, 0x72, 0x86, 0xc2, 0x4b, 0x46, + 0x78, 0x84, 0xf5, 0xad, 0x9e, 0x65, 0x93, 0x36, 0x31, 0xf9, 0x60, 0xca, 0x13, 0x47, 0xd8, 0xd8, + 0x82, 0x26, 0x50, 0x90, 0x82, 0x1d, 0x93, 0x5c, 0xe0, 0xc0, 0x66, 0x07, 0xa6, 0x79, 0x88, 0x3d, + 0xdc, 0xb2, 0x6c, 0x8b, 0x59, 0xe2, 0xcc, 0xb5, 0xac, 0x7f, 0x32, 0x1c, 0x28, 0x3b, 0xf5, 0x5c, + 0xc4, 0xcb, 0x81, 0xf2, 0x6e, 0xf6, 0xca, 0xa2, 0xc5, 0x90, 0x3e, 0x2a, 0x90, 0x86, 0x7d, 0x20, + 0xfb, 0xe4, 0x0f, 0x41, 0xb8, 0xf3, 0xea, 0xbe, 0xeb, 0x25, 0xdc, 0x96, 0xb8, 0xdb, 0x9f, 0x0d, + 0x07, 0x8a, 0x8c, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0xcf, 0xc1, 0x26, 0x16, 0x97, 0x8d, + 0x49, 0xaf, 0xf3, 0xdc, 0xeb, 0xfd, 0xe1, 0x40, 0xd9, 0x3c, 0xc8, 0x9a, 0x67, 0x3b, 0xcc, 0x13, + 0x85, 0x35, 0xb0, 0xd4, 0xe3, 0xf7, 0x12, 0x2a, 0x2f, 0x70, 0xfd, 0xed, 0xe1, 0x40, 0x59, 0x1a, + 0x5d, 0x55, 0x42, 0xcd, 0xc5, 0xe3, 0x26, 0x2f, 0x27, 0x11, 0x0a, 0x7e, 0x0c, 0x56, 0x3a, 0x2e, + 0x65, 0x4f, 0x08, 0xfb, 0xc2, 0xf5, 0x2f, 0x79, 0xf6, 0x29, 0xeb, 0x9b, 0x62, 0x06, 0x57, 0x1e, + 0x8d, 0x4d, 0x68, 0x12, 0x07, 0x7f, 0x0d, 0x96, 0x3b, 0xe2, 0x6c, 0x1b, 0xa5, 0x9e, 0x5b, 0x53, + 0x16, 0x5a, 0xe2, 0x1c, 0xac, 0x57, 0x84, 0xfc, 0x72, 0xd4, 0x4c, 0xd1, 0x58, 0x0d, 0xfe, 0x00, + 0x2c, 0xf1, 0x8f, 0x93, 0xba, 0x5c, 0xe6, 0xbd, 0xb9, 0x2e, 0xe0, 0x4b, 0x8f, 0x46, 0xcd, 0x28, + 0xb2, 0x47, 0xd0, 0x93, 0xc6, 0xa1, 0xbc, 0x9c, 0x85, 0x9e, 0x34, 0x0e, 0x51, 0x64, 0x87, 0xcf, + 0xc0, 0x12, 0x25, 0x8f, 0x2d, 0x27, 0xb8, 0x92, 0x01, 0xdf, 0x72, 0xb7, 0xa7, 0x74, 0xb7, 0x79, + 0xc4, 0x91, 0xa9, 0x5b, 0xc5, 0x58, 0x5d, 0xd8, 0x51, 0x24, 0x09, 0x4d, 0xb0, 0xec, 0x07, 0xce, + 0x01, 0x3d, 0xa7, 0xc4, 0x97, 0x57, 0x32, 0x47, 0x9a, 0xb4, 0x3e, 0x8a, 0xb0, 0x69, 0x0f, 0x71, + 0x64, 0x62, 0x04, 0x1a, 0x0b, 0x43, 0x13, 0x00, 0xfe, 0xc1, 0x2f, 0x2f, 0xf2, 0xce, 0xcc, 0xc3, + 0x2e, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf6, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc, 0x93, 0x04, + 0x20, 0x0d, 0x3c, 0xcf, 0x26, 0x5d, 0xe2, 0x30, 0x6c, 0xf3, 0x56, 0x2a, 0xaf, 0x72, 0x77, 0x3f, + 0x9d, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0xae, 0xcd, 0x59, 0x28, 0xca, 0xf1, 0x19, 0x4e, 0xda, + 0x85, 0x18, 0xed, 0xda, 0xcc, 0x49, 0xcb, 0xbf, 0x0a, 0x8e, 0x27, 0x4d, 0xd8, 0x51, 0x24, 0x09, + 0x3f, 0x03, 0x3b, 0xd1, 0x45, 0x19, 0xb9, 0x2e, 0x3b, 0xb6, 0x6c, 0x42, 0xfb, 0x94, 0x91, 0xae, + 0xbc, 0xce, 0x17, 0x53, 0x55, 0x30, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x28, 0x51, + 0x12, 0x0a, 0x77, 0x68, 0x9c, 0x05, 0x8f, 0xa8, 0x81, 0xed, 0xd1, 0xe9, 0xeb, 0x3a, 0x77, 0xf0, + 0xfe, 0x70, 0xa0, 0x28, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x0a, 0xc8, 0xb8, 0xc8, 0xcf, + 0x06, 0xf7, 0xf3, 0xbd, 0x30, 0xb3, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x0f, 0x6c, 0xe0, 0xe4, 0x93, + 0x05, 0x95, 0x2b, 0x7c, 0xaf, 0x7f, 0x30, 0x65, 0x1e, 0x52, 0xaf, 0x1c, 0xba, 0x2c, 0xc2, 0xb8, + 0x91, 0x32, 0x50, 0x94, 0x51, 0x87, 0x57, 0x00, 0xe2, 0xf4, 0x0b, 0x0b, 0x95, 0xe1, 0xcc, 0x42, + 0x96, 0x79, 0x96, 0x19, 0x2f, 0xb5, 0x8c, 0x89, 0xa2, 0x1c, 0x1f, 0x90, 0x81, 0x0a, 0x4e, 0xbd, + 0x08, 0x51, 0xf9, 0x06, 0x77, 0xfc, 0xc3, 0xd9, 0x8e, 0x63, 0x8e, 0xbe, 0x2b, 0xfc, 0x56, 0xd2, + 0x16, 0x8a, 0xb2, 0x0e, 0xe0, 0x63, 0xb0, 0x25, 0x1a, 0xcf, 0x1d, 0x8a, 0x2f, 0x48, 0xb3, 0x4f, + 0x0d, 0x66, 0x53, 0x79, 0x93, 0xe7, 0x6e, 0x79, 0x38, 0x50, 0xb6, 0x0e, 0x72, 0xec, 0x28, 0x97, + 0x05, 0x1f, 0x80, 0x8d, 0x0b, 0xd7, 0x6f, 0x59, 0xa6, 0x49, 0x9c, 0x48, 0x69, 0x8b, 0x2b, 0x6d, + 0x85, 0xf1, 0x3f, 0x4e, 0xd9, 0x50, 0x06, 0x0d, 0x29, 0xd8, 0x16, 0xca, 0x0d, 0xdf, 0x35, 0x4e, + 0xdd, 0xc0, 0x61, 0x61, 0xb9, 0xa0, 0xf2, 0x76, 0x5c, 0x22, 0xb7, 0x0f, 0xf2, 0x00, 0x2f, 0x07, + 0xca, 0xcd, 0x9c, 0x72, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0x68, 0x83, 0x55, 0xf1, 0xc6, 0x77, 0x68, + 0x63, 0x4a, 0x65, 0x99, 0x6f, 0xf5, 0x7b, 0xd3, 0x13, 0x5b, 0x0c, 0x4f, 0xef, 0x77, 0x7e, 0xf9, + 0x9c, 0x04, 0xa0, 0x84, 0xba, 0xfa, 0x17, 0x09, 0xec, 0x16, 0x26, 0x46, 0x78, 0x2f, 0xf1, 0x70, + 0xa4, 0xa6, 0x1e, 0x8e, 0x60, 0x96, 0xf8, 0x06, 0xde, 0x8d, 0xbe, 0x92, 0x80, 0x5c, 0x54, 0x21, + 0xe0, 0xc7, 0x89, 0x0e, 0xbe, 0x97, 0xea, 0x60, 0x25, 0xc3, 0x7b, 0x03, 0xfd, 0xfb, 0x97, 0x04, + 0xde, 0x99, 0x32, 0x03, 0x71, 0x42, 0x22, 0xe6, 0x24, 0xea, 0x09, 0x0e, 0xb7, 0xb2, 0xc4, 0xd7, + 0xd1, 0x38, 0x21, 0xe5, 0x60, 0x50, 0x21, 0x1b, 0x9e, 0x83, 0x1b, 0x22, 0x1b, 0xa6, 0x6d, 0xfc, + 0xe4, 0xbe, 0xac, 0xbf, 0x33, 0x1c, 0x28, 0x37, 0xea, 0xf9, 0x10, 0x54, 0xc4, 0x55, 0xff, 0x26, + 0x81, 0x9d, 0xfc, 0x92, 0x0f, 0xef, 0x24, 0xc2, 0xad, 0xa4, 0xc2, 0x7d, 0x3d, 0xc5, 0x12, 0xc1, + 0xfe, 0x1d, 0x58, 0x17, 0x07, 0x83, 0xe4, 0x3b, 0x68, 0x22, 0xe8, 0xe1, 0x16, 0x09, 0xcf, 0xf4, + 0x42, 0x22, 0x5a, 0xbe, 0xfc, 0xc9, 0x21, 0xd9, 0x86, 0x52, 0x6a, 0xea, 0xdf, 0x25, 0xf0, 0xde, + 0xcc, 0x62, 0x0b, 0xf5, 0x44, 0xd7, 0xb5, 0x54, 0xd7, 0xab, 0xc5, 0x02, 0x6f, 0xe6, 0x39, 0x54, + 0xff, 0xc5, 0xf3, 0x17, 0xd5, 0xb9, 0x6f, 0x5e, 0x54, 0xe7, 0xbe, 0x7d, 0x51, 0x9d, 0xfb, 0x72, + 0x58, 0x95, 0x9e, 0x0f, 0xab, 0xd2, 0x37, 0xc3, 0xaa, 0xf4, 0xed, 0xb0, 0x2a, 0xfd, 0x77, 0x58, + 0x95, 0xfe, 0xfc, 0xbf, 0xea, 0xdc, 0x6f, 0x76, 0x0b, 0xff, 0x06, 0xf9, 0x7f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xb4, 0x84, 0x53, 0xfb, 0x3b, 0x19, 0x00, 0x00, } -func (m *Eviction) Marshal() (dAtA []byte, err error) { +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -268,41 +744,233 @@ func (m *Eviction) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.DeleteOptions != nil { - { - size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Eviction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } +func (m *HostPortRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IDRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -550,247 +1218,3446 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *Eviction) Size() (n int) { - if m == nil { - return 0 - } + +func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDisruptionBudget) Size() (n int) { - if m == nil { - return 0 +func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *PodDisruptionBudgetList) Size() (n int) { - if m == nil { - return 0 - } +func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } } - return n + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *PodDisruptionBudgetSpec) Size() (n int) { - if m == nil { - return 0 +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.MinAvailable != nil { - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } } - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.UnhealthyPodEvictionPolicy != nil { - l = len(*m.UnhealthyPodEvictionPolicy) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - if m == nil { - return 0 + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k, v := range m.DisruptedPods { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } } - n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Eviction) String() string { - if this == nil { - return "nil" + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } } - s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudget) String() string { - if this == nil { - return "nil" + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetList) String() string { - if this == nil { - return "nil" + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 } - repeatedStringForItems := "[]PodDisruptionBudget{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetSpec) String() string { - if this == nil { - return "nil" + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetStatus) String() string { - if this == nil { - return "nil" + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - repeatedStringForConditions := "[]Condition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - repeatedStringForConditions += "}" - keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) - for k := range this.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, k) + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]v1.Time{" - for _, k := range keysForDisruptedPods { - mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - mapStringForDisruptedPods += "}" - s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, - `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, - `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, - `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *Eviction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinAvailable != nil { + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.UnhealthyPodEvictionPolicy != nil { + l = len(*m.UnhealthyPodEvictionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.DefaultAllowPrivilegeEscalation != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedHostPaths) > 0 { + for _, e := range m.AllowedHostPaths { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedProcMountTypes) > 0 { + for _, s := range m.AllowedProcMountTypes { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllowedCSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedCSIDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Eviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `UnhealthyPodEvictionPolicy:` + valueToStringGenerated(this.UnhealthyPodEvictionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) + } + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MinAvailable == nil { + m.MinAvailable = &intstr.IntOrString{} + } + if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) + m.UnhealthyPodEvictionPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]v1.Time) + } + var mapkey string + mapvalue := &v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DisruptedPods[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) + } + m.DisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DisruptionsAllowed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedPods |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -817,13 +4684,14 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -850,66 +4718,110 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DeleteOptions == nil { - m.DeleteOptions = &v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) } - if iNdEx >= l { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -936,13 +4848,16 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -969,13 +4884,14 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1002,7 +4918,10 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1027,7 +4946,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1050,17 +4969,17 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1070,28 +4989,27 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1118,8 +5036,8 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1144,7 +5062,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1167,17 +5085,17 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1187,31 +5105,27 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.MinAvailable == nil { - m.MinAvailable = &intstr.IntOrString{} - } - if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1238,18 +5152,66 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Selector == nil { - m.Selector = &v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1259,31 +5221,27 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnhealthyPodEvictionPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1311,8 +5269,8 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := UnhealthyPodEvictionPolicyType(dAtA[iNdEx:postIndex]) - m.UnhealthyPodEvictionPolicy = &s + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -1335,7 +5293,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { +func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1358,17 +5316,17 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } - m.ObservedGeneration = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1378,14 +5336,27 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1412,168 +5383,68 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]v1.Time) + if m.SELinuxOptions == nil { + m.SELinuxOptions = &v11.SELinuxOptions{} } - var mapkey string - mapvalue := &v1.Time{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - m.DisruptionsAllowed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DisruptionsAllowed |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredHealthy |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } - m.ExpectedPods = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1583,14 +5454,27 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedPods |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 7: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1617,8 +5501,8 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, v1.Condition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index d1409913f1e..16301c236af 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = "proto2"; package k8s.io.api.policy.v1beta1; +import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,35 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/policy/v1beta1"; +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +message AllowedHostPath { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + optional string pathPrefix = 1; + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + optional bool readOnly = 2; +} + // Eviction evicts a pod from its node subject to certain policies and safety constraints. // This is a subresource of Pod. A request to cause such an eviction is // created by POSTing to .../pods//evictions. @@ -42,6 +72,37 @@ message Eviction { optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// IDRange provides a min/max of an allowed range of IDs. +message IDRange { + // min is the start of the range, inclusive. + optional int64 min = 1; + + // max is the end of the range, inclusive. + optional int64 max = 2; +} + // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { // Standard object's metadata. @@ -177,3 +238,219 @@ message PodDisruptionBudgetStatus { repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 7; } +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +// Deprecated in 1.21. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + // +optional + optional PodSecurityPolicySpec spec = 2; +} + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// PodSecurityPolicySpec defines the policy enforced. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + // +optional + optional bool privileged = 1; + + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + repeated string defaultAddCapabilities = 2; + + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + repeated string requiredDropCapabilities = 3; + + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + repeated string allowedCapabilities = 4; + + // volumes is an allowlist of volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + optional bool readOnlyRootFilesystem = 14; + + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + optional bool defaultAllowPrivilegeEscalation = 15; + + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + optional bool allowPrivilegeEscalation = 16; + + // allowedHostPaths is an allowlist of host paths. Empty indicates + // that all host paths may be used. + // +optional + repeated AllowedHostPath allowedHostPaths = 17; + + // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; + + // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + repeated string allowedUnsafeSysctls = 19; + + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + repeated string forbiddenSysctls = 20; + + // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. + // Empty or nil indicates that only the DefaultProcMountType may be used. + // This requires the ProcMountType feature flag to be enabled. + // +optional + repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +message SELinuxStrategyOptions { + // rule is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index d77f1304070..b3efd6326b5 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, &PodDisruptionBudgetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, &Eviction{}, ) // Add the watch version that applies diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index bc5f970d270..1e6b075e32d 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -227,3 +228,373 @@ type Eviction struct { // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 +// +k8s:prerelease-lifecycle-gen:deprecated=1.21 +// +k8s:prerelease-lifecycle-gen:removed=1.25 + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +// Deprecated in 1.21. +type PodSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodSecurityPolicySpec defines the policy enforced. +type PodSecurityPolicySpec struct { + // privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // volumes is an allowlist of volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` + // seLinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` + // allowedHostPaths is an allowlist of host paths. Empty indicates + // that all host paths may be used. + // +optional + AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` + // AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. + // Empty or nil indicates that only the DefaultProcMountType may be used. + // This requires the ProcMountType feature flag to be enabled. + // +optional + AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +type AllowedHostPath struct { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities +// field and means that any capabilities are allowed to be requested. +var AllowAllCapabilities v1.Capability = "*" + +// FSType gives strong typing to different file systems that are used by volumes. +type FSType string + +const ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + StorageOS FSType = "storageos" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + CSI FSType = "csi" + Ephemeral FSType = "ephemeral" + All FSType = "*" +) + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // min is the start of the range, inclusive. + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security Context. +type SELinuxStrategy string + +const ( + // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// IDRange provides a min/max of an allowed range of IDs. +type IDRange struct { + // min is the start of the range, inclusive. + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// Security Context. +type RunAsUserStrategy string + +const ( + // RunAsUserStrategyMustRunAs means that container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // RunAsUserStrategyRunAsAny means that container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsUserStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" + // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" + // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.10 +// +k8s:prerelease-lifecycle-gen:deprecated=1.21 +// +k8s:prerelease-lifecycle-gen:removed=1.25 + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 4a79d759495..266a9a853a8 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,6 +27,34 @@ package v1beta1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "driver is the name of the Flexvolume driver.", +} + +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_AllowedHostPath = map[string]string{ + "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", + "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", + "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", +} + +func (AllowedHostPath) SwaggerDoc() map[string]string { + return map_AllowedHostPath +} + var map_Eviction = map[string]string{ "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", "metadata": "ObjectMeta describes the pod that is being evicted.", @@ -37,6 +65,36 @@ func (Eviction) SwaggerDoc() map[string]string { return map_Eviction } +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_HostPortRange = map[string]string{ + "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (HostPortRange) SwaggerDoc() map[string]string { + return map_HostPortRange +} + +var map_IDRange = map[string]string{ + "": "IDRange provides a min/max of an allowed range of IDs.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + var map_PodDisruptionBudget = map[string]string{ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -85,4 +143,106 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { return map_PodDisruptionBudgetStatus } +var map_PodSecurityPolicy = map[string]string{ + "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated in 1.21.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the policy enforced.", +} + +func (PodSecurityPolicy) SwaggerDoc() map[string]string { + return map_PodSecurityPolicy +} + +var map_PodSecurityPolicyList = map[string]string{ + "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (PodSecurityPolicyList) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyList +} + +var map_PodSecurityPolicySpec = map[string]string{ + "": "PodSecurityPolicySpec defines the policy enforced.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", + "volumes": "volumes is an allowlist of volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "allowedHostPaths": "allowedHostPaths is an allowlist of host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "allowedFlexVolumes is an allowlist of Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is an allowlist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to allowlist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", + "allowedProcMountTypes": "AllowedProcMountTypes is an allowlist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", +} + +func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySpec +} + +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is an allowlist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + +var map_SELinuxStrategyOptions = map[string]string{ + "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", +} + +func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxStrategyOptions +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 51895ffdb9b..8602d1adcad 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,11 +22,60 @@ limitations under the License. package v1beta1 import ( + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { + if in == nil { + return nil + } + out := new(AllowedCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { + if in == nil { + return nil + } + out := new(AllowedHostPath) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in @@ -58,6 +107,59 @@ func (in *Eviction) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. +func (in *HostPortRange) DeepCopy() *HostPortRange { + if in == nil { + return nil + } + out := new(HostPortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { *out = *in @@ -184,3 +286,268 @@ func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. +func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { + if in == nil { + return nil + } + out := new(PodSecurityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. +func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { + if in == nil { + return nil + } + out := new(PodSecurityPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + in.SELinux.DeepCopyInto(&out.SELinux) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowedHostPaths != nil { + in, out := &in.AllowedHostPaths, &out.AllowedHostPaths + *out = make([]AllowedHostPath, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedProcMountTypes != nil { + in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes + *out = make([]corev1.ProcMountType, len(*in)) + copy(*out, *in) + } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. +func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(corev1.SELinuxOptions) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. +func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go index 765a71e4728..612061d6cf7 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.prerelease-lifecycle.go @@ -90,3 +90,39 @@ func (in *PodDisruptionBudgetList) APILifecycleReplacement() schema.GroupVersion func (in *PodDisruptionBudgetList) APILifecycleRemoved() (major, minor int) { return 1, 25 } + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodSecurityPolicy) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PodSecurityPolicy) APILifecycleDeprecated() (major, minor int) { + return 1, 21 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) { + return 1, 25 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *PodSecurityPolicyList) APILifecycleIntroduced() (major, minor int) { + return 1, 10 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *PodSecurityPolicyList) APILifecycleDeprecated() (major, minor int) { + return 1, 21 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) { + return 1, 25 +} diff --git a/vendor/k8s.io/api/resource/v1alpha2/generated.proto b/vendor/k8s.io/api/resource/v1alpha2/generated.proto index f7748f9a1a2..02412398c43 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/generated.proto +++ b/vendor/k8s.io/api/resource/v1alpha2/generated.proto @@ -107,7 +107,7 @@ message PodSchedulingContextSpec { // that suits all pending resources. This may get increased in the // future, but not reduced. // - // +listType=atomic + // +listType=set // +optional repeated string potentialNodes = 2; } @@ -208,7 +208,7 @@ message ResourceClaimSchedulingStatus { // PodSchedulingSpec.PotentialNodes. This may get increased in the // future, but not reduced. // - // +listType=atomic + // +listType=set // +optional repeated string unsuitableNodes = 2; } diff --git a/vendor/k8s.io/api/resource/v1alpha2/types.go b/vendor/k8s.io/api/resource/v1alpha2/types.go index a614ff9dc1b..21936bfe3d1 100644 --- a/vendor/k8s.io/api/resource/v1alpha2/types.go +++ b/vendor/k8s.io/api/resource/v1alpha2/types.go @@ -248,7 +248,7 @@ type PodSchedulingContextSpec struct { // that suits all pending resources. This may get increased in the // future, but not reduced. // - // +listType=atomic + // +listType=set // +optional PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` } @@ -283,7 +283,7 @@ type ResourceClaimSchedulingStatus struct { // PodSchedulingSpec.PotentialNodes. This may get increased in the // future, but not reduced. // - // +listType=atomic + // +listType=set // +optional UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index b35f708c663..5f8eccaefc5 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -88,7 +88,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeContext will be passed if podInfoOnMount is set to true. + // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 7d7b7664b89..c785f368efd 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -291,7 +291,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeContext will be passed if podInfoOnMount is set to true. + // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 69ee6836107..c92a7f95a29 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 2b15ec3feb0..1f3f380108a 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -243,66 +243,10 @@ func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo -func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} } -func (*VolumeAttributesClass) ProtoMessage() {} -func (*VolumeAttributesClass) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{7} -} -func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttributesClass.Merge(m, src) -} -func (m *VolumeAttributesClass) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttributesClass) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo - -func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} } -func (*VolumeAttributesClassList) ProtoMessage() {} -func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{8} -} -func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttributesClassList.Merge(m, src) -} -func (m *VolumeAttributesClassList) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttributesClassList) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo - func (m *VolumeError) Reset() { *m = VolumeError{} } func (*VolumeError) ProtoMessage() {} func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{9} + return fileDescriptor_10f856db1e670dc4, []int{7} } func (m *VolumeError) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,9 +280,6 @@ func init() { proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") - proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass.ParametersEntry") - proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClassList") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } @@ -347,71 +288,65 @@ func init() { } var fileDescriptor_10f856db1e670dc4 = []byte{ - // 1023 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcb, 0x6e, 0x23, 0x45, - 0x17, 0x4e, 0xe7, 0x32, 0xe3, 0xa9, 0xe4, 0xff, 0xc7, 0x53, 0xca, 0x0c, 0xc6, 0x23, 0xb5, 0x23, - 0xaf, 0x0c, 0x62, 0xba, 0x49, 0x40, 0x68, 0x84, 0xc4, 0xc2, 0x9d, 0x64, 0x11, 0x91, 0x84, 0xa1, - 0x1c, 0x01, 0x02, 0x16, 0x94, 0xdb, 0x07, 0xbb, 0x62, 0xf7, 0x45, 0x55, 0xd5, 0x16, 0x66, 0xc5, - 0x8a, 0x35, 0x3b, 0xde, 0x80, 0x67, 0xc9, 0x02, 0x89, 0xd1, 0xac, 0x66, 0x65, 0x91, 0x86, 0x67, - 0x60, 0xc1, 0x06, 0xd4, 0xd5, 0xe5, 0x76, 0xc7, 0x6d, 0x07, 0x27, 0x8b, 0xec, 0x5c, 0xe7, 0xf2, - 0x9d, 0xdb, 0x77, 0x4e, 0x27, 0xe8, 0xa0, 0xff, 0x5c, 0x58, 0x2c, 0xb0, 0xfb, 0x51, 0x1b, 0xb8, - 0x0f, 0x12, 0x84, 0x3d, 0x04, 0xbf, 0x13, 0x70, 0x5b, 0x2b, 0x68, 0xc8, 0x6c, 0x21, 0x03, 0x4e, - 0xbb, 0x60, 0x0f, 0x77, 0xe9, 0x20, 0xec, 0xd1, 0x5d, 0xbb, 0x0b, 0x3e, 0x70, 0x2a, 0xa1, 0x63, - 0x85, 0x3c, 0x90, 0x01, 0x7e, 0x9a, 0x1a, 0x5b, 0x34, 0x64, 0x96, 0x36, 0xb6, 0x26, 0xc6, 0xd5, - 0x67, 0x5d, 0x26, 0x7b, 0x51, 0xdb, 0x72, 0x03, 0xcf, 0xee, 0x06, 0xdd, 0xc0, 0x56, 0x3e, 0xed, - 0xe8, 0x5b, 0xf5, 0x52, 0x0f, 0xf5, 0x2b, 0xc5, 0xaa, 0xd6, 0x73, 0x81, 0xdd, 0x80, 0x27, 0x51, - 0x67, 0xe3, 0x55, 0xdf, 0x9f, 0xda, 0x78, 0xd4, 0xed, 0x31, 0x1f, 0xf8, 0xc8, 0x0e, 0xfb, 0x5d, - 0xe5, 0xc4, 0x41, 0x04, 0x11, 0x77, 0xe1, 0x46, 0x5e, 0xc2, 0xf6, 0x40, 0xd2, 0x79, 0xb1, 0xec, - 0x45, 0x5e, 0x3c, 0xf2, 0x25, 0xf3, 0x8a, 0x61, 0x3e, 0xf8, 0x2f, 0x07, 0xe1, 0xf6, 0xc0, 0xa3, - 0xb3, 0x7e, 0xf5, 0xbf, 0xd6, 0x10, 0xde, 0x6f, 0x1d, 0xb5, 0xd2, 0xfe, 0xed, 0xd3, 0x90, 0xba, - 0x4c, 0x8e, 0xf0, 0x37, 0xa8, 0x94, 0xa4, 0xd6, 0xa1, 0x92, 0x56, 0x8c, 0x1d, 0xa3, 0xb1, 0xb9, - 0xf7, 0xae, 0x35, 0x6d, 0x77, 0x16, 0xc1, 0x0a, 0xfb, 0xdd, 0x44, 0x20, 0xac, 0xc4, 0xda, 0x1a, - 0xee, 0x5a, 0x9f, 0xb4, 0xcf, 0xc1, 0x95, 0x27, 0x20, 0xa9, 0x83, 0x2f, 0xc6, 0xb5, 0x95, 0x78, - 0x5c, 0x43, 0x53, 0x19, 0xc9, 0x50, 0x31, 0x43, 0x5b, 0x7e, 0xd0, 0x81, 0xb3, 0x20, 0x0c, 0x06, - 0x41, 0x77, 0x54, 0x59, 0x55, 0x51, 0xde, 0x5b, 0x2e, 0xca, 0x31, 0x6d, 0xc3, 0xa0, 0x05, 0x03, - 0x70, 0x65, 0xc0, 0x9d, 0x72, 0x3c, 0xae, 0x6d, 0x9d, 0xe6, 0xc0, 0xc8, 0x15, 0x68, 0x7c, 0x80, - 0xca, 0x9a, 0x1f, 0xfb, 0x03, 0x2a, 0xc4, 0x29, 0xf5, 0xa0, 0xb2, 0xb6, 0x63, 0x34, 0x1e, 0x38, - 0x15, 0x9d, 0x62, 0xb9, 0x35, 0xa3, 0x27, 0x05, 0x0f, 0xfc, 0x05, 0x2a, 0xb9, 0xba, 0x3d, 0x95, - 0x75, 0x95, 0xac, 0x75, 0x5d, 0xb2, 0xd6, 0x84, 0x11, 0xd6, 0xa7, 0x11, 0xf5, 0x25, 0x93, 0x23, - 0x67, 0x2b, 0x1e, 0xd7, 0x4a, 0x93, 0x16, 0x93, 0x0c, 0x0d, 0x0b, 0xf4, 0xc8, 0xa3, 0xdf, 0x31, - 0x2f, 0xf2, 0x3e, 0x0b, 0x06, 0x91, 0x07, 0x2d, 0xf6, 0x3d, 0x54, 0x36, 0x6e, 0x15, 0xe2, 0x71, - 0x3c, 0xae, 0x3d, 0x3a, 0x99, 0x05, 0x23, 0x45, 0xfc, 0xfa, 0xaf, 0x06, 0x7a, 0x52, 0x1c, 0xfc, - 0x31, 0x13, 0x12, 0x7f, 0x5d, 0x18, 0xbe, 0xb5, 0xe4, 0x58, 0x98, 0x48, 0x47, 0x5f, 0xd6, 0x7d, - 0x2d, 0x4d, 0x24, 0xb9, 0xc1, 0x9f, 0xa1, 0x0d, 0x26, 0xc1, 0x13, 0x95, 0xd5, 0x9d, 0xb5, 0xc6, - 0xe6, 0x9e, 0x6d, 0x5d, 0xb3, 0xc6, 0x56, 0x31, 0x43, 0xe7, 0x7f, 0x1a, 0x7b, 0xe3, 0x28, 0x41, - 0x21, 0x29, 0x58, 0xfd, 0x97, 0x55, 0x54, 0x4e, 0xab, 0x6b, 0x4a, 0x49, 0xdd, 0x9e, 0x07, 0xbe, - 0xbc, 0x03, 0x16, 0xb7, 0xd0, 0xba, 0x08, 0xc1, 0xd5, 0xec, 0xdd, 0xbd, 0xb6, 0x96, 0xd9, 0xf4, - 0x5a, 0x21, 0xb8, 0xce, 0x96, 0x86, 0x5f, 0x4f, 0x5e, 0x44, 0x81, 0xe1, 0xaf, 0xd0, 0x3d, 0x21, - 0xa9, 0x8c, 0x84, 0x62, 0xe9, 0xd5, 0xa5, 0x58, 0x02, 0x56, 0xb9, 0x3a, 0xff, 0xd7, 0xc0, 0xf7, - 0xd2, 0x37, 0xd1, 0x90, 0xf5, 0x0b, 0x03, 0x6d, 0xcf, 0xba, 0xdc, 0xc1, 0xd4, 0xc9, 0xd5, 0xa9, - 0x3f, 0xbb, 0x51, 0x49, 0x0b, 0x66, 0xfe, 0xca, 0x40, 0x4f, 0x0a, 0xd5, 0xab, 0x85, 0xc0, 0xc7, - 0x68, 0x3b, 0x04, 0x2e, 0x98, 0x90, 0xe0, 0xcb, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, - 0xae, 0x6d, 0xbf, 0x98, 0xa3, 0x27, 0x73, 0xbd, 0xf0, 0x39, 0x2a, 0x33, 0x7f, 0xc0, 0x7c, 0xd0, - 0xfb, 0x33, 0x9d, 0x78, 0x23, 0x5f, 0x47, 0xf2, 0xe1, 0x48, 0x1a, 0x32, 0x8b, 0xac, 0x06, 0xbd, - 0x9d, 0x9c, 0x99, 0xa3, 0x19, 0x14, 0x52, 0xc0, 0xad, 0xff, 0x36, 0x67, 0x3e, 0x89, 0x02, 0xbf, - 0x83, 0x4a, 0x54, 0x49, 0x80, 0xeb, 0x32, 0xb2, 0x7e, 0x37, 0xb5, 0x9c, 0x64, 0x16, 0x8a, 0x43, - 0xaa, 0x15, 0x73, 0x0e, 0xeb, 0x12, 0x1c, 0x52, 0xae, 0x39, 0x0e, 0xa9, 0x37, 0xd1, 0x90, 0x49, - 0x2a, 0xc9, 0x81, 0xcd, 0x1d, 0xd2, 0x2c, 0x95, 0x53, 0x2d, 0x27, 0x99, 0x45, 0xfd, 0x9f, 0xb5, - 0x39, 0x63, 0x52, 0x64, 0xcc, 0xd5, 0xd4, 0x51, 0x35, 0x95, 0x0a, 0x35, 0x75, 0xb2, 0x9a, 0x3a, - 0xf8, 0x67, 0x03, 0x61, 0x9a, 0x41, 0x9c, 0x4c, 0xc8, 0x9a, 0x32, 0xea, 0xe3, 0x5b, 0x2c, 0x89, - 0xd5, 0x2c, 0xa0, 0x1d, 0xfa, 0x92, 0x8f, 0x9c, 0xaa, 0xce, 0x02, 0x17, 0x0d, 0xc8, 0x9c, 0x14, - 0xf0, 0x39, 0xda, 0x4c, 0xa5, 0x87, 0x9c, 0x07, 0x5c, 0xaf, 0x6d, 0x63, 0x89, 0x8c, 0x94, 0xbd, - 0x63, 0xc6, 0xe3, 0xda, 0x66, 0x73, 0x0a, 0xf0, 0xf7, 0xb8, 0xb6, 0x99, 0xd3, 0x93, 0x3c, 0x78, - 0x12, 0xab, 0x03, 0xd3, 0x58, 0xeb, 0xb7, 0x89, 0x75, 0x00, 0x8b, 0x63, 0xe5, 0xc0, 0xab, 0x87, - 0xe8, 0x8d, 0x05, 0x2d, 0xc2, 0x65, 0xb4, 0xd6, 0x87, 0x51, 0xca, 0x44, 0x92, 0xfc, 0xc4, 0xdb, - 0x68, 0x63, 0x48, 0x07, 0x51, 0xca, 0xb8, 0x07, 0x24, 0x7d, 0x7c, 0xb8, 0xfa, 0xdc, 0xa8, 0xff, - 0xb9, 0x8a, 0x1e, 0x67, 0x13, 0xe0, 0xac, 0x1d, 0x49, 0x10, 0xea, 0xc3, 0x7a, 0x07, 0x17, 0x7a, - 0x0f, 0xa1, 0x0e, 0x67, 0x43, 0xe0, 0x8a, 0xad, 0x2a, 0xb5, 0xa9, 0xc7, 0x41, 0xa6, 0x21, 0x39, - 0x2b, 0x3c, 0x44, 0x28, 0xa4, 0x9c, 0x7a, 0x20, 0x81, 0x27, 0x47, 0x38, 0xe1, 0x97, 0xb3, 0x1c, - 0xbf, 0xf2, 0xd5, 0x59, 0x2f, 0x32, 0x90, 0x94, 0x56, 0x59, 0xdc, 0xa9, 0x82, 0xe4, 0x22, 0x55, - 0x3f, 0x42, 0x0f, 0x67, 0x5c, 0x6e, 0xd4, 0xe6, 0x57, 0x06, 0x7a, 0x73, 0x6e, 0x22, 0x77, 0x70, - 0xdf, 0x3f, 0xbf, 0x7a, 0xdf, 0xf7, 0x6e, 0xde, 0xad, 0x05, 0x47, 0xfe, 0x47, 0x03, 0xe5, 0xf9, - 0x89, 0x8f, 0xd1, 0x7a, 0xf2, 0xf7, 0xac, 0x2e, 0xe1, 0xed, 0xe5, 0x4a, 0x38, 0x63, 0x1e, 0x4c, - 0x3f, 0xb5, 0xc9, 0x8b, 0x28, 0x14, 0xfc, 0x16, 0xba, 0xef, 0x81, 0x10, 0xb4, 0x3b, 0xa1, 0xc6, - 0x43, 0x6d, 0x74, 0xff, 0x24, 0x15, 0x93, 0x89, 0xde, 0x69, 0x5e, 0x5c, 0x9a, 0x2b, 0x2f, 0x2f, - 0xcd, 0x95, 0xd7, 0x97, 0xe6, 0xca, 0x0f, 0xb1, 0x69, 0x5c, 0xc4, 0xa6, 0xf1, 0x32, 0x36, 0x8d, - 0xd7, 0xb1, 0x69, 0xfc, 0x1e, 0x9b, 0xc6, 0x4f, 0x7f, 0x98, 0x2b, 0x5f, 0x3e, 0xbd, 0xe6, 0x3f, - 0x98, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x2f, 0x75, 0xee, 0xf8, 0x0c, 0x00, 0x00, + // 925 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3f, 0x6f, 0x23, 0x45, + 0x14, 0xf7, 0xc6, 0xce, 0x9d, 0x6f, 0x1c, 0xc0, 0x37, 0x32, 0x87, 0xe5, 0x93, 0xd6, 0x91, 0x2b, + 0x83, 0xb8, 0x59, 0x72, 0x20, 0x74, 0xa2, 0xf3, 0x26, 0x29, 0x22, 0x92, 0x00, 0xe3, 0x08, 0x21, + 0xa0, 0x60, 0xbc, 0x7e, 0xd8, 0x13, 0x7b, 0xff, 0x68, 0x67, 0x36, 0xc2, 0x54, 0x54, 0xd4, 0x74, + 0x7c, 0x03, 0x3e, 0x4b, 0x0a, 0x24, 0x4e, 0x54, 0x57, 0x59, 0x64, 0xf9, 0x0e, 0x14, 0x34, 0xa0, + 0x9d, 0x1d, 0xaf, 0x37, 0x5e, 0x27, 0xe7, 0x4b, 0x71, 0x9d, 0xdf, 0x9b, 0xf7, 0x7e, 0xbf, 0xf7, + 0xdf, 0x8b, 0x0e, 0x26, 0xcf, 0x04, 0xe1, 0xbe, 0x35, 0x89, 0x06, 0x10, 0x7a, 0x20, 0x41, 0x58, + 0x17, 0xe0, 0x0d, 0xfd, 0xd0, 0xd2, 0x0f, 0x2c, 0xe0, 0x96, 0x90, 0x7e, 0xc8, 0x46, 0x60, 0x5d, + 0xec, 0xb1, 0x69, 0x30, 0x66, 0x7b, 0xd6, 0x08, 0x3c, 0x08, 0x99, 0x84, 0x21, 0x09, 0x42, 0x5f, + 0xfa, 0xf8, 0x71, 0x6a, 0x4c, 0x58, 0xc0, 0x89, 0x36, 0x26, 0x0b, 0xe3, 0xd6, 0x93, 0x11, 0x97, + 0xe3, 0x68, 0x40, 0x1c, 0xdf, 0xb5, 0x46, 0xfe, 0xc8, 0xb7, 0x94, 0xcf, 0x20, 0xfa, 0x5e, 0x49, + 0x4a, 0x50, 0xbf, 0x52, 0xac, 0x56, 0x27, 0x47, 0xec, 0xf8, 0x61, 0xc2, 0xba, 0xca, 0xd7, 0xfa, + 0x68, 0x69, 0xe3, 0x32, 0x67, 0xcc, 0x3d, 0x08, 0x67, 0x56, 0x30, 0x19, 0x29, 0xa7, 0x10, 0x84, + 0x1f, 0x85, 0x0e, 0xbc, 0x92, 0x97, 0xb0, 0x5c, 0x90, 0x6c, 0x1d, 0x97, 0x75, 0x93, 0x57, 0x18, + 0x79, 0x92, 0xbb, 0x45, 0x9a, 0x8f, 0x5f, 0xe6, 0x20, 0x9c, 0x31, 0xb8, 0x6c, 0xd5, 0xaf, 0xf3, + 0x4f, 0x19, 0xe1, 0xfd, 0xfe, 0x51, 0x3f, 0xad, 0xdf, 0x3e, 0x0b, 0x98, 0xc3, 0xe5, 0x0c, 0x7f, + 0x87, 0xaa, 0x49, 0x68, 0x43, 0x26, 0x59, 0xd3, 0xd8, 0x35, 0xba, 0xb5, 0xa7, 0x1f, 0x90, 0x65, + 0xb9, 0x33, 0x06, 0x12, 0x4c, 0x46, 0x89, 0x42, 0x90, 0xc4, 0x9a, 0x5c, 0xec, 0x91, 0xcf, 0x06, + 0xe7, 0xe0, 0xc8, 0x13, 0x90, 0xcc, 0xc6, 0x97, 0xf3, 0x76, 0x29, 0x9e, 0xb7, 0xd1, 0x52, 0x47, + 0x33, 0x54, 0xcc, 0xd1, 0x8e, 0xe7, 0x0f, 0xe1, 0xcc, 0x0f, 0xfc, 0xa9, 0x3f, 0x9a, 0x35, 0xb7, + 0x14, 0xcb, 0x87, 0x9b, 0xb1, 0x1c, 0xb3, 0x01, 0x4c, 0xfb, 0x30, 0x05, 0x47, 0xfa, 0xa1, 0x5d, + 0x8f, 0xe7, 0xed, 0x9d, 0xd3, 0x1c, 0x18, 0xbd, 0x06, 0x8d, 0x0f, 0x50, 0x5d, 0xcf, 0xc7, 0xfe, + 0x94, 0x09, 0x71, 0xca, 0x5c, 0x68, 0x96, 0x77, 0x8d, 0xee, 0x03, 0xbb, 0xa9, 0x43, 0xac, 0xf7, + 0x57, 0xde, 0x69, 0xc1, 0x03, 0x7f, 0x85, 0xaa, 0x8e, 0x2e, 0x4f, 0xb3, 0xa2, 0x82, 0x25, 0xb7, + 0x05, 0x4b, 0x16, 0x13, 0x41, 0xbe, 0x88, 0x98, 0x27, 0xb9, 0x9c, 0xd9, 0x3b, 0xf1, 0xbc, 0x5d, + 0x5d, 0x94, 0x98, 0x66, 0x68, 0x58, 0xa0, 0x87, 0x2e, 0xfb, 0x81, 0xbb, 0x91, 0xfb, 0xa5, 0x3f, + 0x8d, 0x5c, 0xe8, 0xf3, 0x1f, 0xa1, 0xb9, 0x7d, 0x27, 0x8a, 0xb7, 0xe3, 0x79, 0xfb, 0xe1, 0xc9, + 0x2a, 0x18, 0x2d, 0xe2, 0x77, 0x7e, 0x37, 0xd0, 0xa3, 0x62, 0xe3, 0x8f, 0xb9, 0x90, 0xf8, 0xdb, + 0x42, 0xf3, 0xc9, 0x86, 0x6d, 0xe1, 0x22, 0x6d, 0x7d, 0x5d, 0xd7, 0xb5, 0xba, 0xd0, 0xe4, 0x1a, + 0x7f, 0x86, 0xb6, 0xb9, 0x04, 0x57, 0x34, 0xb7, 0x76, 0xcb, 0xdd, 0xda, 0x53, 0x8b, 0xdc, 0xb2, + 0xc6, 0xa4, 0x18, 0xa1, 0xfd, 0x86, 0xc6, 0xde, 0x3e, 0x4a, 0x50, 0x68, 0x0a, 0xd6, 0xf9, 0x6d, + 0x0b, 0xd5, 0xd3, 0xec, 0x7a, 0x52, 0x32, 0x67, 0xec, 0x82, 0x27, 0x5f, 0xc3, 0x14, 0xf7, 0x51, + 0x45, 0x04, 0xe0, 0xe8, 0xe9, 0xdd, 0xbb, 0x35, 0x97, 0xd5, 0xf0, 0xfa, 0x01, 0x38, 0xf6, 0x8e, + 0x86, 0xaf, 0x24, 0x12, 0x55, 0x60, 0xf8, 0x1b, 0x74, 0x4f, 0x48, 0x26, 0x23, 0xa1, 0xa6, 0xf4, + 0xfa, 0x52, 0x6c, 0x00, 0xab, 0x5c, 0xed, 0x37, 0x35, 0xf0, 0xbd, 0x54, 0xa6, 0x1a, 0xb2, 0x73, + 0x69, 0xa0, 0xc6, 0xaa, 0xcb, 0x6b, 0xe8, 0x3a, 0xbd, 0xde, 0xf5, 0x27, 0xaf, 0x94, 0xd2, 0x0d, + 0x3d, 0xff, 0xd3, 0x40, 0x8f, 0x0a, 0xd9, 0xab, 0x85, 0xc0, 0xc7, 0xa8, 0x11, 0x40, 0x28, 0xb8, + 0x90, 0xe0, 0xc9, 0xd4, 0x46, 0xad, 0xbd, 0x91, 0xae, 0x7d, 0x3c, 0x6f, 0x37, 0x3e, 0x5f, 0xf3, + 0x4e, 0xd7, 0x7a, 0xe1, 0x73, 0x54, 0xe7, 0xde, 0x94, 0x7b, 0xa0, 0xf7, 0x67, 0xd9, 0xf1, 0x6e, + 0x3e, 0x8f, 0xe4, 0x8f, 0x23, 0x29, 0xc8, 0x2a, 0xb2, 0x6a, 0x74, 0x23, 0x39, 0x33, 0x47, 0x2b, + 0x28, 0xb4, 0x80, 0xdb, 0xf9, 0x63, 0x4d, 0x7f, 0x92, 0x07, 0xfc, 0x3e, 0xaa, 0x32, 0xa5, 0x81, + 0x50, 0xa7, 0x91, 0xd5, 0xbb, 0xa7, 0xf5, 0x34, 0xb3, 0x50, 0x33, 0xa4, 0x4a, 0xb1, 0xe6, 0xb0, + 0x6e, 0x30, 0x43, 0xca, 0x35, 0x37, 0x43, 0x4a, 0xa6, 0x1a, 0x32, 0x09, 0x25, 0x39, 0xb0, 0xb9, + 0x43, 0x9a, 0x85, 0x72, 0xaa, 0xf5, 0x34, 0xb3, 0xe8, 0xfc, 0x57, 0x5e, 0xd3, 0x26, 0x35, 0x8c, + 0xb9, 0x9c, 0x86, 0x2a, 0xa7, 0x6a, 0x21, 0xa7, 0x61, 0x96, 0xd3, 0x10, 0xff, 0x6a, 0x20, 0xcc, + 0x32, 0x88, 0x93, 0xc5, 0xb0, 0xa6, 0x13, 0xf5, 0xe9, 0x1d, 0x96, 0x84, 0xf4, 0x0a, 0x68, 0x87, + 0x9e, 0x0c, 0x67, 0x76, 0x4b, 0x47, 0x81, 0x8b, 0x06, 0x74, 0x4d, 0x08, 0xf8, 0x1c, 0xd5, 0x52, + 0xed, 0x61, 0x18, 0xfa, 0xa1, 0x5e, 0xdb, 0xee, 0x06, 0x11, 0x29, 0x7b, 0xdb, 0x8c, 0xe7, 0xed, + 0x5a, 0x6f, 0x09, 0xf0, 0xef, 0xbc, 0x5d, 0xcb, 0xbd, 0xd3, 0x3c, 0x78, 0xc2, 0x35, 0x84, 0x25, + 0x57, 0xe5, 0x2e, 0x5c, 0x07, 0x70, 0x33, 0x57, 0x0e, 0xbc, 0x75, 0x88, 0xde, 0xb9, 0xa1, 0x44, + 0xb8, 0x8e, 0xca, 0x13, 0x98, 0xa5, 0x93, 0x48, 0x93, 0x9f, 0xb8, 0x81, 0xb6, 0x2f, 0xd8, 0x34, + 0x4a, 0x27, 0xee, 0x01, 0x4d, 0x85, 0x4f, 0xb6, 0x9e, 0x19, 0x9d, 0x9f, 0x0d, 0x94, 0xe7, 0xc0, + 0xc7, 0xa8, 0x92, 0x7c, 0x93, 0xe8, 0x33, 0xf3, 0xde, 0x66, 0x67, 0xe6, 0x8c, 0xbb, 0xb0, 0x3c, + 0x97, 0x89, 0x44, 0x15, 0x0a, 0x7e, 0x17, 0xdd, 0x77, 0x41, 0x08, 0x36, 0xd2, 0xcc, 0xf6, 0x5b, + 0xda, 0xe8, 0xfe, 0x49, 0xaa, 0xa6, 0x8b, 0x77, 0xbb, 0x77, 0x79, 0x65, 0x96, 0x9e, 0x5f, 0x99, + 0xa5, 0x17, 0x57, 0x66, 0xe9, 0xa7, 0xd8, 0x34, 0x2e, 0x63, 0xd3, 0x78, 0x1e, 0x9b, 0xc6, 0x8b, + 0xd8, 0x34, 0xfe, 0x8a, 0x4d, 0xe3, 0x97, 0xbf, 0xcd, 0xd2, 0xd7, 0x8f, 0x6f, 0xf9, 0x0a, 0xfd, + 0x3f, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x8d, 0x17, 0x01, 0xbc, 0x0a, 0x00, 0x00, } func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) { @@ -799,115 +734,6 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *VolumeAttributesClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttributesClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { - v := m.Parameters[string(keysForParameters[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForParameters[iNdEx]) - copy(dAtA[i:], keysForParameters[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.DriverName) - copy(dAtA[i:], m.DriverName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DriverName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttributesClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttributesClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttributesClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1089,44 +915,6 @@ func (m *VolumeAttachmentStatus) Size() (n int) { return n } -func (m *VolumeAttributesClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DriverName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *VolumeAttributesClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *VolumeError) Size() (n int) { if m == nil { return 0 @@ -1250,44 +1038,6 @@ func (this *VolumeAttachmentStatus) String() string { }, "") return s } -func (this *VolumeAttributesClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&VolumeAttributesClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DriverName:` + fmt.Sprintf("%v", this.DriverName) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttributesClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]VolumeAttributesClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttributesClass", "VolumeAttributesClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&VolumeAttributesClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} func (this *VolumeError) String() string { if this == nil { return "nil" @@ -2448,365 +2198,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *VolumeAttributesClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttributesClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttributesClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DriverName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DriverName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Parameters[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttributesClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttributesClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttributesClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, VolumeAttributesClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *VolumeError) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 49e522be53c..88250a0f013 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -216,46 +216,6 @@ message VolumeAttachmentStatus { optional VolumeError detachError = 4; } -// VolumeAttributesClass represents a specification of mutable volume attributes -// defined by the CSI driver. The class can be specified during dynamic provisioning -// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. -message VolumeAttributesClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Name of the CSI driver - // This field is immutable. - optional string driverName = 2; - - // parameters hold volume attributes defined by the CSI driver. These values - // are opaque to the Kubernetes and are passed directly to the CSI driver. - // The underlying storage provider supports changing these attributes on an - // existing volume, however the parameters field itself is immutable. To - // invoke a volume update, a new VolumeAttributesClass should be created with - // new parameters, and the PersistentVolumeClaim should be updated to reference - // the new VolumeAttributesClass. - // - // This field is required and must contain at least one key/value pair. - // The keys cannot be empty, and the maximum number of parameters is 512, with - // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, - // the target PersistentVolumeClaim will be set to an "Infeasible" state in the - // modifyVolumeStatus field. - map parameters = 3; -} - -// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. -message VolumeAttributesClassList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of VolumeAttributesClass objects. - repeated VolumeAttributesClass items = 2; -} - // VolumeError captures an error encountered during a volume operation. message VolumeError { // time represents the time the error was encountered. diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go index a70f8e18634..779c858028c 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/register.go +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -45,8 +45,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachmentList{}, &CSIStorageCapacity{}, &CSIStorageCapacityList{}, - &VolumeAttributesClass{}, - &VolumeAttributesClassList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 5957e480749..59ef348a316 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -251,55 +251,3 @@ type CSIStorageCapacityList struct { // +listMapKey=name Items []CSIStorageCapacity `json:"items" protobuf:"bytes,2,rep,name=items"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:prerelease-lifecycle-gen:introduced=1.29 -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttributesClass represents a specification of mutable volume attributes -// defined by the CSI driver. The class can be specified during dynamic provisioning -// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning. -type VolumeAttributesClass struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Name of the CSI driver - // This field is immutable. - DriverName string `json:"driverName" protobuf:"bytes,2,opt,name=driverName"` - - // parameters hold volume attributes defined by the CSI driver. These values - // are opaque to the Kubernetes and are passed directly to the CSI driver. - // The underlying storage provider supports changing these attributes on an - // existing volume, however the parameters field itself is immutable. To - // invoke a volume update, a new VolumeAttributesClass should be created with - // new parameters, and the PersistentVolumeClaim should be updated to reference - // the new VolumeAttributesClass. - // - // This field is required and must contain at least one key/value pair. - // The keys cannot be empty, and the maximum number of parameters is 512, with - // a cumulative max size of 256K. If the CSI driver rejects invalid parameters, - // the target PersistentVolumeClaim will be set to an "Infeasible" state in the - // modifyVolumeStatus field. - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` -} - -// +k8s:prerelease-lifecycle-gen:introduced=1.29 -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttributesClassList is a collection of VolumeAttributesClass objects. -type VolumeAttributesClassList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of VolumeAttributesClass objects. - Items []VolumeAttributesClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index ac87dbdca34..ba6afbd5916 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -103,27 +103,6 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { return map_VolumeAttachmentStatus } -var map_VolumeAttributesClass = map[string]string{ - "": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "driverName": "Name of the CSI driver This field is immutable.", - "parameters": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", -} - -func (VolumeAttributesClass) SwaggerDoc() map[string]string { - return map_VolumeAttributesClass -} - -var map_VolumeAttributesClassList = map[string]string{ - "": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of VolumeAttributesClass objects.", -} - -func (VolumeAttributesClassList) SwaggerDoc() map[string]string { - return map_VolumeAttributesClassList -} - var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "time represents the time the error was encountered.", diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index 942871f7889..d9bc94b2500 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -238,72 +238,6 @@ func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttributesClass) DeepCopyInto(out *VolumeAttributesClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClass. -func (in *VolumeAttributesClass) DeepCopy() *VolumeAttributesClass { - if in == nil { - return nil - } - out := new(VolumeAttributesClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttributesClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttributesClassList) DeepCopyInto(out *VolumeAttributesClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeAttributesClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttributesClassList. -func (in *VolumeAttributesClassList) DeepCopy() *VolumeAttributesClassList { - if in == nil { - return nil - } - out := new(VolumeAttributesClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttributesClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeError) DeepCopyInto(out *VolumeError) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go index c169e782ceb..41114c3c68f 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -120,39 +120,3 @@ func (in *VolumeAttachmentList) APILifecycleReplacement() schema.GroupVersionKin func (in *VolumeAttachmentList) APILifecycleRemoved() (major, minor int) { return 1, 24 } - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *VolumeAttributesClass) APILifecycleIntroduced() (major, minor int) { - return 1, 29 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *VolumeAttributesClass) APILifecycleDeprecated() (major, minor int) { - return 1, 32 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *VolumeAttributesClass) APILifecycleRemoved() (major, minor int) { - return 1, 35 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *VolumeAttributesClassList) APILifecycleIntroduced() (major, minor int) { - return 1, 29 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *VolumeAttributesClassList) APILifecycleDeprecated() (major, minor int) { - return 1, 32 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *VolumeAttributesClassList) APILifecycleRemoved() (major, minor int) { - return 1, 35 -} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index b99fd39e48a..2b354dd4715 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -91,7 +91,7 @@ message CSIDriverSpec { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeContext will be passed if podInfoOnMount is set to true. + // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index 0f5ade3c138..4c39b49ccd8 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -311,7 +311,7 @@ type CSIDriverSpec struct { // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. // - // The following VolumeContext will be passed if podInfoOnMount is set to true. + // The following VolumeConext will be passed if podInfoOnMount is set to true. // This list might grow, but the prefix will be used. // "csi.storage.k8s.io/pod.name": pod.Name // "csi.storage.k8s.io/pod.namespace": pod.Namespace diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 6d9d233066a..0f2718b9c14 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -50,7 +50,7 @@ func (CSIDriverList) SwaggerDoc() map[string]string { var map_CSIDriverSpec = map[string]string{ "": "CSIDriverSpec is the specification of a CSIDriver.", "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", - "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "podInfoOnMount": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.", "storageCapacity": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes <= 1.22 and now is mutable.", "fsGroupPolicy": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index cbdf2eeb831..60c8209de02 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,15 +22,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true -// if the conditions are changed by this call. +// SetStatusCondition sets the corresponding condition in conditions to newCondition. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { if conditions == nil { - return false + return } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -38,7 +37,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return true + return } if existingCondition.Status != newCondition.Status { @@ -48,31 +47,18 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } - changed = true } - if existingCondition.Reason != newCondition.Reason { - existingCondition.Reason = newCondition.Reason - changed = true - } - if existingCondition.Message != newCondition.Message { - existingCondition.Message = newCondition.Message - changed = true - } - if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { - existingCondition.ObservedGeneration = newCondition.ObservedGeneration - changed = true - } - - return changed + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + existingCondition.ObservedGeneration = newCondition.ObservedGeneration } -// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns -// true if it was present and got removed. +// RemoveStatusCondition removes the corresponding conditionType from conditions. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { if conditions == nil || len(*conditions) == 0 { - return false + return } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -81,10 +67,7 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } - removed = len(*conditions) != len(newConditions) *conditions = newConditions - - return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index 2eebec667d3..a8866a43e10 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,44 +203,6 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } -// Mul multiplies the provided b to the current amount, or -// returns false if overflow or underflow would result. -func (a *int64Amount) Mul(b int64) bool { - switch { - case a.value == 0: - return true - case b == 0: - a.value = 0 - a.scale = 0 - return true - case a.scale == 0: - c, ok := int64Multiply(a.value, b) - if !ok { - return false - } - a.value = c - case a.scale > 0: - c, ok := int64Multiply(a.value, b) - if !ok { - return false - } - if _, ok = positiveScaleInt64(c, a.scale); !ok { - return false - } - a.value = c - default: - c, ok := int64Multiply(a.value, b) - if !ok { - return false - } - if _, ok = negativeScaleInt64(c, -a.scale); !ok { - return false - } - a.value = c - } - return true -} - // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 69f1bc336d3..b47d554b3c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -592,16 +592,6 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } -// Mul multiplies the provided y to the current value. -// It will return false if the result is inexact. Otherwise, it will return true. -func (q *Quantity) Mul(y int64) bool { - q.s = "" - if q.d.Dec == nil && q.i.Mul(y) { - return true - } - return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() -} - // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 0f58d66c094..2e33283ef22 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) } } return strMap, true, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index f46a24cc6c3..7bd1a3a6a5b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -257,26 +257,3 @@ func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersion } return obj, gvk, err } - -type encoderWithAllocator struct { - encoder EncoderWithAllocator - memAllocator MemoryAllocator -} - -// NewEncoderWithAllocator returns a new encoder -func NewEncoderWithAllocator(e EncoderWithAllocator, a MemoryAllocator) Encoder { - return &encoderWithAllocator{ - encoder: e, - memAllocator: a, - } -} - -// Encode writes the provided object to the nested writer -func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { - return e.encoder.EncodeWithAllocator(obj, w, e.memAllocator) -} - -// Identifier returns identifier of this encoder. -func (e *encoderWithAllocator) Identifier() Identifier { - return e.encoder.Identifier() -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 971c46d496a..87b3fec3f2d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -134,3 +134,23 @@ func (e *encoder) Encode(obj runtime.Object) error { e.buf.Reset() return err } + +type encoderWithAllocator struct { + writer io.Writer + encoder runtime.EncoderWithAllocator + memAllocator runtime.MemoryAllocator +} + +// NewEncoderWithAllocator returns a new streaming encoder +func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder { + return &encoderWithAllocator{ + writer: w, + encoder: e, + memAllocator: a, + } +} + +// Encode writes the provided object to the nested writer +func (e *encoderWithAllocator) Encode(obj runtime.Object) error { + return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index ad486d580f0..1328dd61202 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -136,19 +136,6 @@ func (c *LRUExpireCache) Remove(key interface{}) { delete(c.entries, key) } -// RemoveAll removes all keys that match predicate. -func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { - c.lock.Lock() - defer c.lock.Unlock() - - for key, element := range c.entries { - if predicate(key) { - c.evictionList.Remove(element) - delete(c.entries, key) - } - } -} - // Keys returns all unexpired keys in the cache. // // Keep in mind that subsequent calls to Get() for any of the returned keys diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go index a32fce5a0c1..32f075782a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -17,7 +17,6 @@ limitations under the License. package httpstream import ( - "errors" "fmt" "io" "net/http" @@ -96,26 +95,6 @@ type Stream interface { Identifier() uint32 } -// UpgradeFailureError encapsulates the cause for why the streaming -// upgrade request failed. Implements error interface. -type UpgradeFailureError struct { - Cause error -} - -func (u *UpgradeFailureError) Error() string { - return fmt.Sprintf("unable to upgrade streaming request: %s", u.Cause) -} - -// IsUpgradeFailure returns true if the passed error is (or wrapped error contains) -// the UpgradeFailureError. -func IsUpgradeFailure(err error) bool { - if err == nil { - return false - } - var upgradeErr *UpgradeFailureError - return errors.As(err, &upgradeErr) -} - // IsUpgradeRequest returns true if the given request is a connection upgrade request func IsUpgradeRequest(req *http.Request) bool { for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index c78326fa3b5..7fe52ee568e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -38,7 +38,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/httpstream" utilnet "k8s.io/apimachinery/pkg/util/net" - apiproxy "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apimachinery/third_party/forked/golang/netutil" ) @@ -69,10 +68,6 @@ type SpdyRoundTripper struct { // pingPeriod is a period for sending Ping frames over established // connections. pingPeriod time.Duration - - // upgradeTransport is an optional substitute for dialing if present. This field is - // mutually exclusive with the "tlsConfig", "Dialer", and "proxier". - upgradeTransport http.RoundTripper } var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} @@ -81,61 +76,43 @@ var _ utilnet.Dialer = &SpdyRoundTripper{} // NewRoundTripper creates a new SpdyRoundTripper that will use the specified // tlsConfig. -func NewRoundTripper(tlsConfig *tls.Config) (*SpdyRoundTripper, error) { +func NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, - UpgradeTransport: nil, + TLS: tlsConfig, }) } // NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the // specified tlsConfig and proxy func. -func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) (*SpdyRoundTripper, error) { +func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper { return NewRoundTripperWithConfig(RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxier, - UpgradeTransport: nil, + TLS: tlsConfig, + Proxier: proxier, }) } // NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified -// configuration. Returns an error if the SpdyRoundTripper is misconfigured. -func NewRoundTripperWithConfig(cfg RoundTripperConfig) (*SpdyRoundTripper, error) { - // Process UpgradeTransport, which is mutually exclusive to TLSConfig and Proxier. - if cfg.UpgradeTransport != nil { - if cfg.TLS != nil || cfg.Proxier != nil { - return nil, fmt.Errorf("SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier") - } - tlsConfig, err := utilnet.TLSClientConfig(cfg.UpgradeTransport) - if err != nil { - return nil, fmt.Errorf("SpdyRoundTripper: Unable to retrieve TLSConfig from UpgradeTransport: %v", err) - } - cfg.TLS = tlsConfig - } +// configuration. +func NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper { if cfg.Proxier == nil { cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) } return &SpdyRoundTripper{ - tlsConfig: cfg.TLS, - proxier: cfg.Proxier, - pingPeriod: cfg.PingPeriod, - upgradeTransport: cfg.UpgradeTransport, - }, nil + tlsConfig: cfg.TLS, + proxier: cfg.Proxier, + pingPeriod: cfg.PingPeriod, + } } // RoundTripperConfig is a set of options for an SpdyRoundTripper. type RoundTripperConfig struct { - // TLS configuration used by the round tripper if UpgradeTransport not present. + // TLS configuration used by the round tripper. TLS *tls.Config // Proxier is a proxy function invoked on each request. Optional. Proxier func(*http.Request) (*url.URL, error) // PingPeriod is a period for sending SPDY Pings on the connection. // Optional. PingPeriod time.Duration - // UpgradeTransport is a subtitute transport used for dialing. If set, - // this field will be used instead of "TLS" and "Proxier" for connection creation. - // Optional. - UpgradeTransport http.RoundTripper } // TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during @@ -146,13 +123,7 @@ func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { // Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { - var conn net.Conn - var err error - if s.upgradeTransport != nil { - conn, err = apiproxy.DialURL(req.Context(), req.URL, s.upgradeTransport) - } else { - conn, err = s.dial(req) - } + conn, err := s.dial(req) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go index 8a741936a3d..09f54a49c74 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go @@ -21,19 +21,16 @@ import ( "fmt" "io" "net/http" + "regexp" "strings" "time" "golang.org/x/net/websocket" + "k8s.io/klog/v2" - "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/remotecommand" "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog/v2" ) -const WebSocketProtocolHeader = "Sec-Websocket-Protocol" - // The Websocket subprotocol "channel.k8s.io" prepends each binary message with a byte indicating // the channel number (zero indexed) the message was sent on. Messages in both directions should // prefix their messages with this channel byte. When used for remote execution, the channel numbers @@ -80,30 +77,18 @@ const ( ReadWriteChannel ) +var ( + // connectionUpgradeRegex matches any Connection header value that includes upgrade + connectionUpgradeRegex = regexp.MustCompile("(^|.*,\\s*)upgrade($|\\s*,)") +) + // IsWebSocketRequest returns true if the incoming request contains connection upgrade headers // for WebSockets. func IsWebSocketRequest(req *http.Request) bool { if !strings.EqualFold(req.Header.Get("Upgrade"), "websocket") { return false } - return httpstream.IsUpgradeRequest(req) -} - -// IsWebSocketRequestWithStreamCloseProtocol returns true if the request contains headers -// identifying that it is requesting a websocket upgrade with a remotecommand protocol -// version that supports the "CLOSE" signal; false otherwise. -func IsWebSocketRequestWithStreamCloseProtocol(req *http.Request) bool { - if !IsWebSocketRequest(req) { - return false - } - requestedProtocols := strings.TrimSpace(req.Header.Get(WebSocketProtocolHeader)) - for _, requestedProtocol := range strings.Split(requestedProtocols, ",") { - if protocolSupportsStreamClose(strings.TrimSpace(requestedProtocol)) { - return true - } - } - - return false + return connectionUpgradeRegex.MatchString(strings.ToLower(req.Header.Get("Connection"))) } // IgnoreReceives reads from a WebSocket until it is closed, then returns. If timeout is set, the @@ -187,46 +172,15 @@ func (conn *Conn) SetIdleTimeout(duration time.Duration) { conn.timeout = duration } -// SetWriteDeadline sets a timeout on writing to the websocket connection. The -// passed "duration" identifies how far into the future the write must complete -// by before the timeout fires. -func (conn *Conn) SetWriteDeadline(duration time.Duration) { - conn.ws.SetWriteDeadline(time.Now().Add(duration)) //nolint:errcheck -} - // Open the connection and create channels for reading and writing. It returns // the selected subprotocol, a slice of channels and an error. func (conn *Conn) Open(w http.ResponseWriter, req *http.Request) (string, []io.ReadWriteCloser, error) { - // serveHTTPComplete is channel that is closed/selected when "websocket#ServeHTTP" finishes. - serveHTTPComplete := make(chan struct{}) - // Ensure panic in spawned goroutine is propagated into the parent goroutine. - panicChan := make(chan any, 1) go func() { - // If websocket server returns, propagate panic if necessary. Otherwise, - // signal HTTPServe finished by closing "serveHTTPComplete". - defer func() { - if p := recover(); p != nil { - panicChan <- p - } else { - close(serveHTTPComplete) - } - }() + defer runtime.HandleCrash() + defer conn.Close() websocket.Server{Handshake: conn.handshake, Handler: conn.handle}.ServeHTTP(w, req) }() - - // In normal circumstances, "websocket.Server#ServeHTTP" calls "initialize" which closes - // "conn.ready" and then blocks until serving is complete. - select { - case <-conn.ready: - klog.V(8).Infof("websocket server initialized--serving") - case <-serveHTTPComplete: - // websocket server returned before completing initialization; cleanup and return error. - conn.closeNonThreadSafe() //nolint:errcheck - return "", nil, fmt.Errorf("websocket server finished before becoming ready") - case p := <-panicChan: - panic(p) - } - + <-conn.ready rwc := make([]io.ReadWriteCloser, len(conn.channels)) for i := range conn.channels { rwc[i] = conn.channels[i] @@ -275,37 +229,20 @@ func (conn *Conn) resetTimeout() { } } -// closeNonThreadSafe cleans up by closing streams and the websocket -// connection *without* waiting for the "ready" channel. -func (conn *Conn) closeNonThreadSafe() error { - for _, s := range conn.channels { - s.Close() - } - var err error - if conn.ws != nil { - err = conn.ws.Close() - } - return err -} - // Close is only valid after Open has been called func (conn *Conn) Close() error { <-conn.ready - return conn.closeNonThreadSafe() -} - -// protocolSupportsStreamClose returns true if the passed protocol -// supports the stream close signal (currently only V5 remotecommand); -// false otherwise. -func protocolSupportsStreamClose(protocol string) bool { - return protocol == remotecommand.StreamProtocolV5Name + for _, s := range conn.channels { + s.Close() + } + conn.ws.Close() + return nil } // handle implements a websocket handler. func (conn *Conn) handle(ws *websocket.Conn) { - conn.initialize(ws) defer conn.Close() - supportsStreamClose := protocolSupportsStreamClose(conn.selectedProtocol) + conn.initialize(ws) for { conn.resetTimeout() @@ -319,21 +256,6 @@ func (conn *Conn) handle(ws *websocket.Conn) { if len(data) == 0 { continue } - if supportsStreamClose && data[0] == remotecommand.StreamClose { - if len(data) != 2 { - klog.Errorf("Single channel byte should follow stream close signal. Got %d bytes", len(data)-1) - break - } else { - channel := data[1] - if int(channel) >= len(conn.channels) { - klog.Errorf("Close is targeted for a channel %d that is not valid, possible protocol error", channel) - break - } - klog.V(4).Infof("Received half-close signal from client; close %d stream", channel) - conn.channels[channel].Close() // After first Close, other closes are noop. - } - continue - } channel := data[0] if conn.codec == base64Codec { channel = channel - '0' @@ -344,7 +266,7 @@ func (conn *Conn) handle(ws *websocket.Conn) { continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { - klog.Errorf("Unable to write frame (%d bytes) to %d: %v", len(data), channel, err) + klog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) continue } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go index 3dd6f828b70..a1aa1688bd9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go @@ -16,54 +16,6 @@ limitations under the License. // Package wsstream contains utilities for streaming content over WebSockets. // The Conn type allows callers to multiplex multiple read/write channels over -// a single websocket. -// -// "channel.k8s.io" -// -// The Websocket RemoteCommand subprotocol "channel.k8s.io" prepends each binary message with a -// byte indicating the channel number (zero indexed) the message was sent on. Messages in both -// directions should prefix their messages with this channel byte. Used for remote execution, -// the channel numbers are by convention defined to match the POSIX file-descriptors assigned -// to STDIN, STDOUT, and STDERR (0, 1, and 2). No other conversion is performed on the raw -// subprotocol - writes are sent as they are received by the server. -// -// Example client session: -// -// CONNECT http://server.com with subprotocol "channel.k8s.io" -// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) -// READ []byte{1, 10} # receive "\n" on channel 1 (STDOUT) -// CLOSE -// -// "v2.channel.k8s.io" -// -// The second Websocket subprotocol version "v2.channel.k8s.io" is the same as version 1, -// but it is the first "versioned" subprotocol. -// -// "v3.channel.k8s.io" -// -// The third version of the Websocket RemoteCommand subprotocol adds another channel -// for terminal resizing events. This channel is prepended with the byte '3', and it -// transmits two window sizes (encoding TerminalSize struct) with integers in the range -// (0,65536]. -// -// "v4.channel.k8s.io" -// -// The fourth version of the Websocket RemoteCommand subprotocol adds a channel for -// errors. This channel returns structured errors containing process exit codes. The -// error is "apierrors.StatusError{}". -// -// "v5.channel.k8s.io" -// -// The fifth version of the Websocket RemoteCommand subprotocol adds a CLOSE signal, -// which is sent as the first byte of the message. The second byte is the channel -// id. This CLOSE signal is handled by the websocket server by closing the stream, -// allowing the other streams to complete transmission if necessary, and gracefully -// shutdown the connection. -// -// Example client session: -// -// CONNECT http://server.com with subprotocol "v5.channel.k8s.io" -// WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) -// WRITE []byte{255, 0} # send CLOSE signal (STDIN) -// CLOSE +// a single websocket. The Reader type allows an io.Reader to be copied over +// a websocket channel as binary content. package wsstream // import "k8s.io/apimachinery/pkg/util/httpstream/wsstream" diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index f358c794d10..0ea88156bef 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -72,14 +72,14 @@ func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } -// Parse the given string and try to convert it to an int32 integer before +// Parse the given string and try to convert it to an integer before // setting it as a string value. func Parse(val string) IntOrString { - i, err := strconv.ParseInt(val, 10, 32) + i, err := strconv.Atoi(val) if err != nil { return FromString(val) } - return FromInt32(int32(i)) + return FromInt(i) } // UnmarshalJSON implements the json.Unmarshaller interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index 786ad991c23..2112c9ab7e9 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/merge" - "sigs.k8s.io/structured-merge-diff/v4/typed" ) type structuredMergeManager struct { @@ -96,11 +95,11 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } - newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates) + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) if err != nil { return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) } @@ -140,13 +139,11 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } - // Don't allow duplicates in the applied object. patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) if err != nil { return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) } - - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) if err != nil { return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go index c6449467cf8..1ac96d7f7bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -32,7 +32,7 @@ import ( // TypeConverter allows you to convert from runtime.Object to // typed.TypedValue and the other way around. type TypeConverter interface { - ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error) + ObjectToTyped(runtime.Object) (*typed.TypedValue, error) TypedToObject(*typed.TypedValue) (runtime.Object, error) } @@ -54,7 +54,7 @@ func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields return &typeConverter{parser: tr}, nil } -func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { +func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { gvk := obj.GetObjectKind().GroupVersionKind() t := c.parser[gvk] if t == nil { @@ -62,9 +62,9 @@ func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.Validati } switch o := obj.(type) { case *unstructured.Unstructured: - return t.FromUnstructured(o.UnstructuredContent(), opts...) + return t.FromUnstructured(o.UnstructuredContent()) default: - return t.FromStructured(obj, opts...) + return t.FromStructured(obj) } } @@ -84,12 +84,12 @@ func NewDeducedTypeConverter() TypeConverter { } // ObjectToTyped converts an object into a TypedValue with a "deduced type". -func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { switch o := obj.(type) { case *unstructured.Unstructured: - return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...) + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) default: - return typed.DeducedParseableType.FromStructured(obj, opts...) + return typed.DeducedParseableType.FromStructured(obj) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go deleted file mode 100644 index e5196d1ee83..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proxy - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/url" - - utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/third_party/forked/golang/netutil" - "k8s.io/klog/v2" -) - -// DialURL will dial the specified URL using the underlying dialer held by the passed -// RoundTripper. The primary use of this method is to support proxying upgradable connections. -// For this reason this method will prefer to negotiate http/1.1 if the URL scheme is https. -// If you wish to ensure ALPN negotiates http2 then set NextProto=[]string{"http2"} in the -// TLSConfig of the http.Transport -func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) { - dialAddr := netutil.CanonicalAddr(url) - - dialer, err := utilnet.DialerFor(transport) - if err != nil { - klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) - } - - switch url.Scheme { - case "http": - if dialer != nil { - return dialer(ctx, "tcp", dialAddr) - } - var d net.Dialer - return d.DialContext(ctx, "tcp", dialAddr) - case "https": - // Get the tls config from the transport if we recognize it - tlsConfig, err := utilnet.TLSClientConfig(transport) - if err != nil { - klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) - } - - if dialer != nil { - // We have a dialer; use it to open the connection, then - // create a tls client using the connection. - netConn, err := dialer(ctx, "tcp", dialAddr) - if err != nil { - return nil, err - } - if tlsConfig == nil { - // tls.Client requires non-nil config - klog.Warning("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") - // tls.Handshake() requires ServerName or InsecureSkipVerify - tlsConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } else if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { - // tls.HandshakeContext() requires ServerName or InsecureSkipVerify - // infer the ServerName from the hostname we're connecting to. - inferredHost := dialAddr - if host, _, err := net.SplitHostPort(dialAddr); err == nil { - inferredHost = host - } - // Make a copy to avoid polluting the provided config - tlsConfigCopy := tlsConfig.Clone() - tlsConfigCopy.ServerName = inferredHost - tlsConfig = tlsConfigCopy - } - - // Since this method is primarily used within a "Connection: Upgrade" call we assume the caller is - // going to write HTTP/1.1 request to the wire. http2 should not be allowed in the TLSConfig.NextProtos, - // so we explicitly set that here. We only do this check if the TLSConfig support http/1.1. - if supportsHTTP11(tlsConfig.NextProtos) { - tlsConfig = tlsConfig.Clone() - tlsConfig.NextProtos = []string{"http/1.1"} - } - - tlsConn := tls.Client(netConn, tlsConfig) - if err := tlsConn.HandshakeContext(ctx); err != nil { - netConn.Close() - return nil, err - } - return tlsConn, nil - } else { - // Dial. - tlsDialer := tls.Dialer{ - Config: tlsConfig, - } - return tlsDialer.DialContext(ctx, "tcp", dialAddr) - } - default: - return nil, fmt.Errorf("unknown scheme: %s", url.Scheme) - } -} - -func supportsHTTP11(nextProtos []string) bool { - if len(nextProtos) == 0 { - return true - } - for _, proto := range nextProtos { - if proto == "http/1.1" { - return true - } - } - return false -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go deleted file mode 100644 index d14ecfad544..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package proxy provides transport and upgrade support for proxies. -package proxy // import "k8s.io/apimachinery/pkg/util/proxy" diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go deleted file mode 100644 index 5a2dd6e14c8..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proxy - -import ( - "bytes" - "compress/flate" - "compress/gzip" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strings" - - "golang.org/x/net/html" - "golang.org/x/net/html/atom" - "k8s.io/klog/v2" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/net" - "k8s.io/apimachinery/pkg/util/sets" -) - -// atomsToAttrs states which attributes of which tags require URL substitution. -// Sources: http://www.w3.org/TR/REC-html40/index/attributes.html -// -// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 -var atomsToAttrs = map[atom.Atom]sets.String{ - atom.A: sets.NewString("href"), - atom.Applet: sets.NewString("codebase"), - atom.Area: sets.NewString("href"), - atom.Audio: sets.NewString("src"), - atom.Base: sets.NewString("href"), - atom.Blockquote: sets.NewString("cite"), - atom.Body: sets.NewString("background"), - atom.Button: sets.NewString("formaction"), - atom.Command: sets.NewString("icon"), - atom.Del: sets.NewString("cite"), - atom.Embed: sets.NewString("src"), - atom.Form: sets.NewString("action"), - atom.Frame: sets.NewString("longdesc", "src"), - atom.Head: sets.NewString("profile"), - atom.Html: sets.NewString("manifest"), - atom.Iframe: sets.NewString("longdesc", "src"), - atom.Img: sets.NewString("longdesc", "src", "usemap"), - atom.Input: sets.NewString("src", "usemap", "formaction"), - atom.Ins: sets.NewString("cite"), - atom.Link: sets.NewString("href"), - atom.Object: sets.NewString("classid", "codebase", "data", "usemap"), - atom.Q: sets.NewString("cite"), - atom.Script: sets.NewString("src"), - atom.Source: sets.NewString("src"), - atom.Video: sets.NewString("poster", "src"), - - // TODO: css URLs hidden in style elements. -} - -// Transport is a transport for text/html content that replaces URLs in html -// content with the prefix of the proxy server -type Transport struct { - Scheme string - Host string - PathPrepend string - - http.RoundTripper -} - -// RoundTrip implements the http.RoundTripper interface -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - // Add reverse proxy headers. - forwardedURI := path.Join(t.PathPrepend, req.URL.EscapedPath()) - if strings.HasSuffix(req.URL.Path, "/") { - forwardedURI = forwardedURI + "/" - } - req.Header.Set("X-Forwarded-Uri", forwardedURI) - if len(t.Host) > 0 { - req.Header.Set("X-Forwarded-Host", t.Host) - } - if len(t.Scheme) > 0 { - req.Header.Set("X-Forwarded-Proto", t.Scheme) - } - - rt := t.RoundTripper - if rt == nil { - rt = http.DefaultTransport - } - resp, err := rt.RoundTrip(req) - - if err != nil { - return nil, errors.NewServiceUnavailable(fmt.Sprintf("error trying to reach service: %v", err)) - } - - if redirect := resp.Header.Get("Location"); redirect != "" { - targetURL, err := url.Parse(redirect) - if err != nil { - return nil, errors.NewInternalError(fmt.Errorf("error trying to parse Location header: %v", err)) - } - resp.Header.Set("Location", t.rewriteURL(targetURL, req.URL, req.Host)) - return resp, nil - } - - cType := resp.Header.Get("Content-Type") - cType = strings.TrimSpace(strings.SplitN(cType, ";", 2)[0]) - if cType != "text/html" { - // Do nothing, simply pass through - return resp, nil - } - - return t.rewriteResponse(req, resp) -} - -var _ = net.RoundTripperWrapper(&Transport{}) - -func (rt *Transport) WrappedRoundTripper() http.RoundTripper { - return rt.RoundTripper -} - -// rewriteURL rewrites a single URL to go through the proxy, if the URL refers -// to the same host as sourceURL, which is the page on which the target URL -// occurred, or if the URL matches the sourceRequestHost. -func (t *Transport) rewriteURL(url *url.URL, sourceURL *url.URL, sourceRequestHost string) string { - // Example: - // When API server processes a proxy request to a service (e.g. /api/v1/namespace/foo/service/bar/proxy/), - // the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The - // sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the - // URL is sought, which can be different from sourceURL.Host. For example, if user sends the - // request through "kubectl proxy" locally (i.e. localhost:8001/api/v1/namespace/foo/service/bar/proxy/), - // sourceRequestHost is "localhost:8001". - // - // If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host - // or sourceRequestHost, we should not consider the returned URL to be a completely different host. - // It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the - // necessary URL prefix (i.e. /api/v1/namespace/foo/service/bar/proxy/). - isDifferentHost := url.Host != "" && url.Host != sourceURL.Host && url.Host != sourceRequestHost - isRelative := !strings.HasPrefix(url.Path, "/") - if isDifferentHost || isRelative { - return url.String() - } - - // Do not rewrite scheme and host if the Transport has empty scheme and host - // when targetURL already contains the sourceRequestHost - if !(url.Host == sourceRequestHost && t.Scheme == "" && t.Host == "") { - url.Scheme = t.Scheme - url.Host = t.Host - } - - origPath := url.Path - // Do not rewrite URL if the sourceURL already contains the necessary prefix. - if strings.HasPrefix(url.Path, t.PathPrepend) { - return url.String() - } - url.Path = path.Join(t.PathPrepend, url.Path) - if strings.HasSuffix(origPath, "/") { - // Add back the trailing slash, which was stripped by path.Join(). - url.Path += "/" - } - - return url.String() -} - -// rewriteHTML scans the HTML for tags with url-valued attributes, and updates -// those values with the urlRewriter function. The updated HTML is output to the -// writer. -func rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(*url.URL) string) error { - // Note: This assumes the content is UTF-8. - tokenizer := html.NewTokenizer(reader) - - var err error - for err == nil { - tokenType := tokenizer.Next() - switch tokenType { - case html.ErrorToken: - err = tokenizer.Err() - case html.StartTagToken, html.SelfClosingTagToken: - token := tokenizer.Token() - if urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok { - for i, attr := range token.Attr { - if urlAttrs.Has(attr.Key) { - url, err := url.Parse(attr.Val) - if err != nil { - // Do not rewrite the URL if it isn't valid. It is intended not - // to error here to prevent the inability to understand the - // content of the body to cause a fatal error. - continue - } - token.Attr[i].Val = urlRewriter(url) - } - } - } - _, err = writer.Write([]byte(token.String())) - default: - _, err = writer.Write(tokenizer.Raw()) - } - } - if err != io.EOF { - return err - } - return nil -} - -// rewriteResponse modifies an HTML response by updating absolute links referring -// to the original host to instead refer to the proxy transport. -func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) { - origBody := resp.Body - defer origBody.Close() - - newContent := &bytes.Buffer{} - var reader io.Reader = origBody - var writer io.Writer = newContent - encoding := resp.Header.Get("Content-Encoding") - switch encoding { - case "gzip": - var err error - reader, err = gzip.NewReader(reader) - if err != nil { - return nil, fmt.Errorf("errorf making gzip reader: %v", err) - } - gzw := gzip.NewWriter(writer) - defer gzw.Close() - writer = gzw - case "deflate": - var err error - reader = flate.NewReader(reader) - flw, err := flate.NewWriter(writer, flate.BestCompression) - if err != nil { - return nil, fmt.Errorf("errorf making flate writer: %v", err) - } - defer func() { - flw.Close() - flw.Flush() - }() - writer = flw - case "": - // This is fine - default: - // Some encoding we don't understand-- don't try to parse this - klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) - return resp, nil - } - - urlRewriter := func(targetUrl *url.URL) string { - return t.rewriteURL(targetUrl, req.URL, req.Host) - } - err := rewriteHTML(reader, writer, urlRewriter) - if err != nil { - klog.Errorf("Failed to rewrite URLs: %v", err) - return resp, err - } - - resp.Body = io.NopCloser(newContent) - // Update header node with new content-length - // TODO: Remove any hash/signature headers here? - resp.Header.Del("Content-Length") - resp.ContentLength = int64(newContent.Len()) - - return resp, err -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go deleted file mode 100644 index 76acdfb4aca..00000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ /dev/null @@ -1,556 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package proxy - -import ( - "bufio" - "bytes" - "fmt" - "io" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "strings" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/httpstream" - utilnet "k8s.io/apimachinery/pkg/util/net" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - "github.com/mxk/go-flowrate/flowrate" - "k8s.io/klog/v2" -) - -// UpgradeRequestRoundTripper provides an additional method to decorate a request -// with any authentication or other protocol level information prior to performing -// an upgrade on the server. Any response will be handled by the intercepting -// proxy. -type UpgradeRequestRoundTripper interface { - http.RoundTripper - // WrapRequest takes a valid HTTP request and returns a suitably altered version - // of request with any HTTP level values required to complete the request half of - // an upgrade on the server. It does not get a chance to see the response and - // should bypass any request side logic that expects to see the response. - WrapRequest(*http.Request) (*http.Request, error) -} - -// UpgradeAwareHandler is a handler for proxy requests that may require an upgrade -type UpgradeAwareHandler struct { - // UpgradeRequired will reject non-upgrade connections if true. - UpgradeRequired bool - // Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server - // for upgrade requests unless UseRequestLocationOnUpgrade is true. - Location *url.URL - // AppendLocationPath determines if the original path of the Location should be appended to the upstream proxy request path - AppendLocationPath bool - // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used - Transport http.RoundTripper - // UpgradeTransport, if specified, will be used as the backend transport when upgrade requests are provided. - // This allows clients to disable HTTP/2. - UpgradeTransport UpgradeRequestRoundTripper - // WrapTransport indicates whether the provided Transport should be wrapped with default proxy transport behavior (URL rewriting, X-Forwarded-* header setting) - WrapTransport bool - // UseRequestLocation will use the incoming request URL when talking to the backend server. - UseRequestLocation bool - // UseLocationHost overrides the HTTP host header in requests to the backend server to use the Host from Location. - // This will override the req.Host field of a request, while UseRequestLocation will override the req.URL field - // of a request. The req.URL.Host specifies the server to connect to, while the req.Host field - // specifies the Host header value to send in the HTTP request. If this is false, the incoming req.Host header will - // just be forwarded to the backend server. - UseLocationHost bool - // FlushInterval controls how often the standard HTTP proxy will flush content from the upstream. - FlushInterval time.Duration - // MaxBytesPerSec controls the maximum rate for an upstream connection. No rate is imposed if the value is zero. - MaxBytesPerSec int64 - // Responder is passed errors that occur while setting up proxying. - Responder ErrorResponder - // Reject to forward redirect response - RejectForwardingRedirects bool -} - -const defaultFlushInterval = 200 * time.Millisecond - -// ErrorResponder abstracts error reporting to the proxy handler to remove the need to hardcode a particular -// error format. -type ErrorResponder interface { - Error(w http.ResponseWriter, req *http.Request, err error) -} - -// SimpleErrorResponder is the legacy implementation of ErrorResponder for callers that only -// service a single request/response per proxy. -type SimpleErrorResponder interface { - Error(err error) -} - -func NewErrorResponder(r SimpleErrorResponder) ErrorResponder { - return simpleResponder{r} -} - -type simpleResponder struct { - responder SimpleErrorResponder -} - -func (r simpleResponder) Error(w http.ResponseWriter, req *http.Request, err error) { - r.responder.Error(err) -} - -// upgradeRequestRoundTripper implements proxy.UpgradeRequestRoundTripper. -type upgradeRequestRoundTripper struct { - http.RoundTripper - upgrader http.RoundTripper -} - -var ( - _ UpgradeRequestRoundTripper = &upgradeRequestRoundTripper{} - _ utilnet.RoundTripperWrapper = &upgradeRequestRoundTripper{} -) - -// WrappedRoundTripper returns the round tripper that a caller would use. -func (rt *upgradeRequestRoundTripper) WrappedRoundTripper() http.RoundTripper { - return rt.RoundTripper -} - -// WriteToRequest calls the nested upgrader and then copies the returned request -// fields onto the passed request. -func (rt *upgradeRequestRoundTripper) WrapRequest(req *http.Request) (*http.Request, error) { - resp, err := rt.upgrader.RoundTrip(req) - if err != nil { - return nil, err - } - return resp.Request, nil -} - -// onewayRoundTripper captures the provided request - which is assumed to have -// been modified by other round trippers - and then returns a fake response. -type onewayRoundTripper struct{} - -// RoundTrip returns a simple 200 OK response that captures the provided request. -func (onewayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return &http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: io.NopCloser(&bytes.Buffer{}), - Request: req, - }, nil -} - -// MirrorRequest is a round tripper that can be called to get back the calling request as -// the core round tripper in a chain. -var MirrorRequest http.RoundTripper = onewayRoundTripper{} - -// NewUpgradeRequestRoundTripper takes two round trippers - one for the underlying TCP connection, and -// one that is able to write headers to an HTTP request. The request rt is used to set the request headers -// and that is written to the underlying connection rt. -func NewUpgradeRequestRoundTripper(connection, request http.RoundTripper) UpgradeRequestRoundTripper { - return &upgradeRequestRoundTripper{ - RoundTripper: connection, - upgrader: request, - } -} - -// normalizeLocation returns the result of parsing the full URL, with scheme set to http if missing -func normalizeLocation(location *url.URL) *url.URL { - normalized, _ := url.Parse(location.String()) - if len(normalized.Scheme) == 0 { - normalized.Scheme = "http" - } - return normalized -} - -// NewUpgradeAwareHandler creates a new proxy handler with a default flush interval. Responder is required for returning -// errors to the caller. -func NewUpgradeAwareHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareHandler { - return &UpgradeAwareHandler{ - Location: normalizeLocation(location), - Transport: transport, - WrapTransport: wrapTransport, - UpgradeRequired: upgradeRequired, - FlushInterval: defaultFlushInterval, - Responder: responder, - } -} - -func proxyRedirectsforRootPath(path string, w http.ResponseWriter, req *http.Request) bool { - redirect := false - method := req.Method - - // From pkg/genericapiserver/endpoints/handlers/proxy.go#ServeHTTP: - // Redirect requests with an empty path to a location that ends with a '/' - // This is essentially a hack for https://issue.k8s.io/4958. - // Note: Keep this code after tryUpgrade to not break that flow. - if len(path) == 0 && (method == http.MethodGet || method == http.MethodHead) { - var queryPart string - if len(req.URL.RawQuery) > 0 { - queryPart = "?" + req.URL.RawQuery - } - w.Header().Set("Location", req.URL.Path+"/"+queryPart) - w.WriteHeader(http.StatusMovedPermanently) - redirect = true - } - return redirect -} - -// ServeHTTP handles the proxy request -func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if h.tryUpgrade(w, req) { - return - } - if h.UpgradeRequired { - h.Responder.Error(w, req, errors.NewBadRequest("Upgrade request required")) - return - } - - loc := *h.Location - loc.RawQuery = req.URL.RawQuery - - // If original request URL ended in '/', append a '/' at the end of the - // of the proxy URL - if !strings.HasSuffix(loc.Path, "/") && strings.HasSuffix(req.URL.Path, "/") { - loc.Path += "/" - } - - proxyRedirect := proxyRedirectsforRootPath(loc.Path, w, req) - if proxyRedirect { - return - } - - if h.Transport == nil || h.WrapTransport { - h.Transport = h.defaultProxyTransport(req.URL, h.Transport) - } - - // WithContext creates a shallow clone of the request with the same context. - newReq := req.WithContext(req.Context()) - newReq.Header = utilnet.CloneHeader(req.Header) - if !h.UseRequestLocation { - newReq.URL = &loc - } - if h.UseLocationHost { - // exchanging req.Host with the backend location is necessary for backends that act on the HTTP host header (e.g. API gateways), - // because req.Host has preference over req.URL.Host in filling this header field - newReq.Host = h.Location.Host - } - - // create the target location to use for the reverse proxy - reverseProxyLocation := &url.URL{Scheme: h.Location.Scheme, Host: h.Location.Host} - if h.AppendLocationPath { - reverseProxyLocation.Path = h.Location.Path - } - - proxy := httputil.NewSingleHostReverseProxy(reverseProxyLocation) - proxy.Transport = h.Transport - proxy.FlushInterval = h.FlushInterval - proxy.ErrorLog = log.New(noSuppressPanicError{}, "", log.LstdFlags) - if h.RejectForwardingRedirects { - oldModifyResponse := proxy.ModifyResponse - proxy.ModifyResponse = func(response *http.Response) error { - code := response.StatusCode - if code >= 300 && code <= 399 && len(response.Header.Get("Location")) > 0 { - // close the original response - response.Body.Close() - msg := "the backend attempted to redirect this request, which is not permitted" - // replace the response - *response = http.Response{ - StatusCode: http.StatusBadGateway, - Status: fmt.Sprintf("%d %s", response.StatusCode, http.StatusText(response.StatusCode)), - Body: io.NopCloser(strings.NewReader(msg)), - ContentLength: int64(len(msg)), - } - } else { - if oldModifyResponse != nil { - if err := oldModifyResponse(response); err != nil { - return err - } - } - } - return nil - } - } - if h.Responder != nil { - // if an optional error interceptor/responder was provided wire it - // the custom responder might be used for providing a unified error reporting - // or supporting retry mechanisms by not sending non-fatal errors to the clients - proxy.ErrorHandler = h.Responder.Error - } - proxy.ServeHTTP(w, newReq) -} - -type noSuppressPanicError struct{} - -func (noSuppressPanicError) Write(p []byte) (n int, err error) { - // skip "suppressing panic for copyResponse error in test; copy error" error message - // that ends up in CI tests on each kube-apiserver termination as noise and - // everybody thinks this is fatal. - if strings.Contains(string(p), "suppressing panic") { - return len(p), nil - } - return os.Stderr.Write(p) -} - -// tryUpgrade returns true if the request was handled. -func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { - if !httpstream.IsUpgradeRequest(req) { - klog.V(6).Infof("Request was not an upgrade") - return false - } - - var ( - backendConn net.Conn - rawResponse []byte - err error - ) - - location := *h.Location - if h.UseRequestLocation { - location = *req.URL - location.Scheme = h.Location.Scheme - location.Host = h.Location.Host - if h.AppendLocationPath { - location.Path = singleJoiningSlash(h.Location.Path, location.Path) - } - } - - clone := utilnet.CloneRequest(req) - // Only append X-Forwarded-For in the upgrade path, since httputil.NewSingleHostReverseProxy - // handles this in the non-upgrade path. - utilnet.AppendForwardedForHeader(clone) - klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) - if h.UseLocationHost { - clone.Host = h.Location.Host - } - clone.URL = &location - backendConn, err = h.DialForUpgrade(clone) - if err != nil { - klog.V(6).Infof("Proxy connection error: %v", err) - h.Responder.Error(w, req, err) - return true - } - defer backendConn.Close() - - // determine the http response code from the backend by reading from rawResponse+backendConn - backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) - if err != nil { - klog.V(6).Infof("Proxy connection error: %v", err) - h.Responder.Error(w, req, err) - return true - } - if len(headerBytes) > len(rawResponse) { - // we read beyond the bytes stored in rawResponse, update rawResponse to the full set of bytes read from the backend - rawResponse = headerBytes - } - - // If the backend did not upgrade the request, return an error to the client. If the response was - // an error, the error is forwarded directly after the connection is hijacked. Otherwise, just - // return a generic error here. - if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 { - err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode) - klog.Errorf("Proxy upgrade error: %v", err) - h.Responder.Error(w, req, err) - return true - } - - // Once the connection is hijacked, the ErrorResponder will no longer work, so - // hijacking should be the last step in the upgrade. - requestHijacker, ok := w.(http.Hijacker) - if !ok { - klog.V(6).Infof("Unable to hijack response writer: %T", w) - h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) - return true - } - requestHijackedConn, _, err := requestHijacker.Hijack() - if err != nil { - klog.V(6).Infof("Unable to hijack response: %v", err) - h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) - return true - } - defer requestHijackedConn.Close() - - if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols { - // If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection. - klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode) - // set read/write deadlines - deadline := time.Now().Add(10 * time.Second) - backendConn.SetReadDeadline(deadline) - requestHijackedConn.SetWriteDeadline(deadline) - // write the response to the client - err := backendHTTPResponse.Write(requestHijackedConn) - if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - klog.Errorf("Error proxying data from backend to client: %v", err) - } - // Indicate we handled the request - return true - } - - // Forward raw response bytes back to client. - if len(rawResponse) > 0 { - klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) - if _, err = requestHijackedConn.Write(rawResponse); err != nil { - utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) - } - } - - // Proxy the connection. This is bidirectional, so we need a goroutine - // to copy in each direction. Once one side of the connection exits, we - // exit the function which performs cleanup and in the process closes - // the other half of the connection in the defer. - writerComplete := make(chan struct{}) - readerComplete := make(chan struct{}) - - go func() { - var writer io.WriteCloser - if h.MaxBytesPerSec > 0 { - writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec) - } else { - writer = backendConn - } - _, err := io.Copy(writer, requestHijackedConn) - if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - klog.Errorf("Error proxying data from client to backend: %v", err) - } - close(writerComplete) - }() - - go func() { - var reader io.ReadCloser - if h.MaxBytesPerSec > 0 { - reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec) - } else { - reader = backendConn - } - _, err := io.Copy(requestHijackedConn, reader) - if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - klog.Errorf("Error proxying data from backend to client: %v", err) - } - close(readerComplete) - }() - - // Wait for one half the connection to exit. Once it does the defer will - // clean up the other half of the connection. - select { - case <-writerComplete: - case <-readerComplete: - } - klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) - - return true -} - -// FIXME: Taken from net/http/httputil/reverseproxy.go as singleJoiningSlash is not exported to be re-used. -// See-also: https://github.com/golang/go/issues/44290 -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - switch { - case aslash && bslash: - return a + b[1:] - case !aslash && !bslash: - return a + "/" + b - } - return a + b -} - -func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error) { - if h.UpgradeTransport == nil { - return dial(req, h.Transport) - } - updatedReq, err := h.UpgradeTransport.WrapRequest(req) - if err != nil { - return nil, err - } - return dial(updatedReq, h.UpgradeTransport) -} - -// getResponseCode reads a http response from the given reader, returns the response, -// the bytes read from the reader, and any error encountered -func getResponse(r io.Reader) (*http.Response, []byte, error) { - rawResponse := bytes.NewBuffer(make([]byte, 0, 256)) - // Save the bytes read while reading the response headers into the rawResponse buffer - resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil) - if err != nil { - return nil, nil, err - } - // return the http response and the raw bytes consumed from the reader in the process - return resp, rawResponse.Bytes(), nil -} - -// dial dials the backend at req.URL and writes req to it. -func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) { - conn, err := DialURL(req.Context(), req.URL, transport) - if err != nil { - return nil, fmt.Errorf("error dialing backend: %v", err) - } - - if err = req.Write(conn); err != nil { - conn.Close() - return nil, fmt.Errorf("error sending request: %v", err) - } - - return conn, err -} - -func (h *UpgradeAwareHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { - scheme := url.Scheme - host := url.Host - suffix := h.Location.Path - if strings.HasSuffix(url.Path, "/") && !strings.HasSuffix(suffix, "/") { - suffix += "/" - } - pathPrepend := strings.TrimSuffix(url.Path, suffix) - rewritingTransport := &Transport{ - Scheme: scheme, - Host: host, - PathPrepend: pathPrepend, - RoundTripper: internalTransport, - } - return &corsRemovingTransport{ - RoundTripper: rewritingTransport, - } -} - -// corsRemovingTransport is a wrapper for an internal transport. It removes CORS headers -// from the internal response. -// Implements pkg/util/net.RoundTripperWrapper -type corsRemovingTransport struct { - http.RoundTripper -} - -var _ = utilnet.RoundTripperWrapper(&corsRemovingTransport{}) - -func (rt *corsRemovingTransport) RoundTrip(req *http.Request) (*http.Response, error) { - resp, err := rt.RoundTripper.RoundTrip(req) - if err != nil { - return nil, err - } - removeCORSHeaders(resp) - return resp, nil -} - -func (rt *corsRemovingTransport) WrappedRoundTripper() http.RoundTripper { - return rt.RoundTripper -} - -// removeCORSHeaders strip CORS headers sent from the backend -// This should be called on all responses before returning -func removeCORSHeaders(resp *http.Response) { - resp.Header.Del("Access-Control-Allow-Credentials") - resp.Header.Del("Access-Control-Allow-Headers") - resp.Header.Del("Access-Control-Allow-Methods") - resp.Header.Del("Access-Control-Allow-Origin") -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go index ba153ee24fc..237ebaef486 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go @@ -46,22 +46,8 @@ const ( // adds support for exit codes. StreamProtocolV4Name = "v4.channel.k8s.io" - // The subprotocol "v5.channel.k8s.io" is used for remote command - // attachment/execution. It is the 5th version of the subprotocol and - // adds support for a CLOSE signal. - StreamProtocolV5Name = "v5.channel.k8s.io" - NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode") ExitCodeCauseType = metav1.CauseType("ExitCode") - - // RemoteCommand stream identifiers. The first three identifiers (for STDIN, - // STDOUT, STDERR) are the same as their file descriptors. - StreamStdIn = 0 - StreamStdOut = 1 - StreamStdErr = 2 - StreamErr = 3 - StreamResize = 4 - StreamClose = 255 ) var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index 85b0cfc0728..df305b712c5 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -20,17 +20,12 @@ import ( "errors" "fmt" "reflect" - "strings" "k8s.io/apimachinery/pkg/util/mergepatch" forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" openapi "k8s.io/kube-openapi/pkg/util/proto" - "k8s.io/kube-openapi/pkg/validation/spec" ) -const patchMergeKey = "x-kubernetes-patch-merge-key" -const patchStrategy = "x-kubernetes-patch-strategy" - type PatchMeta struct { patchStrategies []string patchMergeKey string @@ -153,90 +148,6 @@ func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { return t } -type PatchMetaFromOpenAPIV3 struct { - // SchemaList is required to resolve OpenAPI V3 references - SchemaList map[string]*spec.Schema - Schema *spec.Schema -} - -func (s PatchMetaFromOpenAPIV3) traverse(key string) (PatchMetaFromOpenAPIV3, error) { - if s.Schema == nil { - return PatchMetaFromOpenAPIV3{}, nil - } - if len(s.Schema.Properties) == 0 { - return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) - } - subschema, ok := s.Schema.Properties[key] - if !ok { - return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) - } - return PatchMetaFromOpenAPIV3{SchemaList: s.SchemaList, Schema: &subschema}, nil -} - -func resolve(l *PatchMetaFromOpenAPIV3) error { - if len(l.Schema.AllOf) > 0 { - l.Schema = &l.Schema.AllOf[0] - } - if refString := l.Schema.Ref.String(); refString != "" { - str := strings.TrimPrefix(refString, "#/components/schemas/") - sch, ok := l.SchemaList[str] - if ok { - l.Schema = sch - } else { - return fmt.Errorf("unable to resolve %s in OpenAPI V3", refString) - } - } - return nil -} - -func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { - l, err := s.traverse(key) - if err != nil { - return l, PatchMeta{}, err - } - p := PatchMeta{} - f, ok := l.Schema.Extensions[patchMergeKey] - if ok { - p.SetPatchMergeKey(f.(string)) - } - g, ok := l.Schema.Extensions[patchStrategy] - if ok { - p.SetPatchStrategies(strings.Split(g.(string), ",")) - } - - err = resolve(&l) - return l, p, err -} - -func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { - l, err := s.traverse(key) - if err != nil { - return l, PatchMeta{}, err - } - p := PatchMeta{} - f, ok := l.Schema.Extensions[patchMergeKey] - if ok { - p.SetPatchMergeKey(f.(string)) - } - g, ok := l.Schema.Extensions[patchStrategy] - if ok { - p.SetPatchStrategies(strings.Split(g.(string), ",")) - } - if l.Schema.Items != nil { - l.Schema = l.Schema.Items.Schema - } - err = resolve(&l) - return l, p, err -} - -func (s PatchMetaFromOpenAPIV3) Name() string { - schema := s.Schema - if len(schema.Type) > 0 { - return strings.Join(schema.Type, "") - } - return "Struct" -} - type PatchMetaFromOpenAPI struct { Schema openapi.Schema } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index bc387d01163..ae73bda9666 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -200,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). -func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error { +func NotSupported(field *Path, value interface{}, validValues []string) *Error { detail := "" if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { - quotedValues[i] = strconv.Quote(fmt.Sprint(v)) + quotedValues[i] = strconv.Quote(v) } detail = "supported values: " + strings.Join(quotedValues, ", ") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 2292ba13765..4c619569533 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -18,7 +18,6 @@ package version import ( "bytes" - "errors" "fmt" "regexp" "strconv" @@ -86,47 +85,6 @@ func parse(str string, semver bool) (*Version, error) { return v, nil } -// HighestSupportedVersion returns the highest supported version -// This function assumes that the highest supported version must be v1.x. -func HighestSupportedVersion(versions []string) (*Version, error) { - if len(versions) == 0 { - return nil, errors.New("empty array for supported versions") - } - - var ( - highestSupportedVersion *Version - theErr error - ) - - for i := len(versions) - 1; i >= 0; i-- { - currentHighestVer, err := ParseGeneric(versions[i]) - if err != nil { - theErr = err - continue - } - - if currentHighestVer.Major() > 1 { - continue - } - - if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { - highestSupportedVersion = currentHighestVer - } - } - - if highestSupportedVersion == nil { - return nil, fmt.Errorf( - "could not find a highest supported version from versions (%v) reported: %+v", - versions, theErr) - } - - if highestSupportedVersion.Major() != 1 { - return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion) - } - - return highestSupportedVersion, nil -} - // ParseGeneric parses a "generic" version string. The version string must consist of two // or more dot-separated numeric fields (the first of which can't have leading zeroes), // followed by arbitrary uninterpreted data (which need not be separated from the final diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go index 107bfc132fd..0dd13c626c8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -40,10 +40,6 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding var timeCh <-chan time.Time doneCh := ctx.Done() - if !sliding { - timeCh = t.C() - } - // if immediate is true the condition is // guaranteed to be executed at least once, // if we haven't requested immediate execution, delay once @@ -54,27 +50,17 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding }(); err != nil || ok { return err } - } - - if sliding { + } else { timeCh = t.C() - } - - for { - - // Wait for either the context to be cancelled or the next invocation be called select { case <-doneCh: return ctx.Err() case <-timeCh: } + } - // IMPORTANT: Because there is no channel priority selection in golang - // it is possible for very short timers to "win" the race in the previous select - // repeatedly even when the context has been canceled. We therefore must - // explicitly check for context cancellation on every loop and exit if true to - // guarantee that we don't invoke condition more than once after context has - // been cancelled. + for { + // checking ctx.Err() is slightly faster than checking a select if err := ctx.Err(); err != nil { return err } @@ -91,5 +77,21 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding if sliding { t.Next() } + + if timeCh == nil { + timeCh = t.C() + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and doneCh, and t.C select falls through. + // In order to mitigate we re-check doneCh at the beginning + // of every loop to guarantee at-most one extra execution + // of condition. + select { + case <-doneCh: + return ctx.Err() + case <-timeCh: + } } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/config.go b/vendor/k8s.io/apiserver/pkg/admission/config.go index c0b75a98315..43613321b94 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/config.go +++ b/vendor/k8s.io/apiserver/pkg/admission/config.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "os" "path" "path/filepath" @@ -59,7 +60,7 @@ func ReadAdmissionConfiguration(pluginNames []string, configFilePath string, con return configProvider{config: &apiserver.AdmissionConfiguration{}}, nil } // a file was provided, so we just read it. - data, err := os.ReadFile(configFilePath) + data, err := ioutil.ReadFile(configFilePath) if err != nil { return nil, fmt.Errorf("unable to read admission control configuration from %q [%v]", configFilePath, err) } @@ -140,7 +141,7 @@ func GetAdmissionPluginConfigurationFor(pluginCfg apiserver.AdmissionPluginConfi } // there is nothing nested, so we delegate to path if pluginCfg.Path != "" { - content, err := os.ReadFile(pluginCfg.Path) + content, err := ioutil.ReadFile(pluginCfg.Path) if err != nil { klog.Fatalf("Couldn't open admission plugin configuration %s: %#v", pluginCfg.Path, err) return nil, err diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go index b7b589d273a..25ee108ea95 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/compile.go @@ -141,7 +141,6 @@ type CompilationResult struct { Program cel.Program Error *apiservercel.Error ExpressionAccessor ExpressionAccessor - OutputType *cel.Type } // Compiler provides a CEL expression compiler configured with the desired admission related CEL variables and @@ -215,7 +214,6 @@ func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor, op return CompilationResult{ Program: prog, ExpressionAccessor: expressionAccessor, - OutputType: ast.OutputType(), } } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 2dbfa099164..3da5549442d 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -23,7 +23,6 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - "github.com/google/cel-go/common/types/traits" v1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" @@ -70,8 +69,8 @@ func (c *CompositedCompiler) CompileAndStoreVariables(variables []NamedExpressio } func (c *CompositedCompiler) CompileAndStoreVariable(variable NamedExpressionAccessor, options OptionalVariableDeclarations, mode environment.Type) CompilationResult { + c.CompositionEnv.AddField(variable.GetName()) result := c.Compiler.CompileCELExpression(variable, options, mode) - c.CompositionEnv.AddField(variable.GetName(), result.OutputType) c.CompositionEnv.CompiledVariables[variable.GetName()] = result return result } @@ -91,8 +90,8 @@ type CompositionEnv struct { CompiledVariables map[string]CompilationResult } -func (c *CompositionEnv) AddField(name string, celType *cel.Type) { - c.MapType.Fields[name] = apiservercel.NewDeclField(name, convertCelTypeToDeclType(celType), true, nil, nil) +func (c *CompositionEnv) AddField(name string) { + c.MapType.Fields[name] = apiservercel.NewDeclField(name, apiservercel.DynType, true, nil, nil) } func NewCompositionEnv(typeName string, baseEnvSet *environment.EnvSet) (*CompositionEnv, error) { @@ -197,48 +196,3 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { } return v } - -// convertCelTypeToDeclType converts a cel.Type to DeclType, for the use of -// the TypeProvider and the cost estimator. -// List and map types are created on-demand with their parameters converted recursively. -func convertCelTypeToDeclType(celType *cel.Type) *apiservercel.DeclType { - if celType == nil { - return apiservercel.DynType - } - switch celType { - case cel.AnyType: - return apiservercel.AnyType - case cel.BoolType: - return apiservercel.BoolType - case cel.BytesType: - return apiservercel.BytesType - case cel.DoubleType: - return apiservercel.DoubleType - case cel.DurationType: - return apiservercel.DurationType - case cel.IntType: - return apiservercel.IntType - case cel.NullType: - return apiservercel.NullType - case cel.StringType: - return apiservercel.StringType - case cel.TimestampType: - return apiservercel.TimestampType - case cel.UintType: - return apiservercel.UintType - default: - if celType.HasTrait(traits.ContainerType) && celType.HasTrait(traits.IndexerType) { - parameters := celType.Parameters() - switch len(parameters) { - case 1: - elemType := convertCelTypeToDeclType(parameters[0]) - return apiservercel.NewListType(elemType, -1) - case 2: - keyType := convertCelTypeToDeclType(parameters[0]) - valueType := convertCelTypeToDeclType(parameters[1]) - return apiservercel.NewMapType(keyType, valueType, -1) - } - } - return apiservercel.DynType - } -} diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go index 9cd3c01aed3..b2624694c84 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/controller_reconcile.go @@ -180,9 +180,8 @@ func (c *policyController) reconcilePolicyDefinitionSpec(namespace, name string, celmetrics.Metrics.ObserveDefinition(context.TODO(), "active", "deny") } - // Skip reconcile if the spec of the definition is unchanged and had a - // successful previous sync - if info.configurationError == nil && info.lastReconciledValue != nil && definition != nil && + // Skip reconcile if the spec of the definition is unchanged + if info.lastReconciledValue != nil && definition != nil && apiequality.Semantic.DeepEqual(info.lastReconciledValue.Spec, definition.Spec) { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go index 86c8479c3a4..6d73e237b07 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/validatingadmissionpolicy/typechecking.go @@ -238,7 +238,7 @@ func (c *TypeChecker) typesToCheck(p *v1beta1.ValidatingAdmissionPolicy) []schem if p.Spec.MatchConstraints == nil || len(p.Spec.MatchConstraints.ResourceRules) == 0 { return nil } - restMapperRefreshAttempted := false // at most once per policy, refresh RESTMapper and retry resolution. + for _, rule := range p.Spec.MatchConstraints.ResourceRules { groups := extractGroups(&rule.Rule) if len(groups) == 0 { @@ -268,16 +268,7 @@ func (c *TypeChecker) typesToCheck(p *v1beta1.ValidatingAdmissionPolicy) []schem } resolved, err := c.RestMapper.KindsFor(gvr) if err != nil { - if restMapperRefreshAttempted { - // RESTMapper refresh happens at most once per policy - continue - } - c.tryRefreshRESTMapper() - restMapperRefreshAttempted = true - resolved, err = c.RestMapper.KindsFor(gvr) - if err != nil { - continue - } + continue } for _, r := range resolved { if !r.Empty() { @@ -353,13 +344,6 @@ func sortGVKList(list []schema.GroupVersionKind) []schema.GroupVersionKind { return list } -// tryRefreshRESTMapper refreshes the RESTMapper if it supports refreshing. -func (c *TypeChecker) tryRefreshRESTMapper() { - if r, ok := c.RestMapper.(meta.ResettableRESTMapper); ok { - r.Reset() - } -} - func buildEnv(hasParams bool, hasAuthorizer bool, types typeOverwrite) (*cel.Env, error) { baseEnv := environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion()) requestType := plugincel.BuildRequestType() diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go index 7b845f1d19a..78f5312a475 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config/kubeconfig.go @@ -19,6 +19,7 @@ package config import ( "fmt" "io" + "io/ioutil" "path" "k8s.io/apimachinery/pkg/runtime" @@ -46,7 +47,7 @@ func LoadConfig(configFile io.Reader) (string, error) { var kubeconfigFile string if configFile != nil { // we have a config so parse it. - data, err := io.ReadAll(configFile) + data, err := ioutil.ReadAll(configFile) if err != nil { return "", err } diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugins.go b/vendor/k8s.io/apiserver/pkg/admission/plugins.go index 10a435d4926..1afb480dd7f 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugins.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugins.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "reflect" "sort" "strings" @@ -114,7 +115,7 @@ func splitStream(config io.Reader) (io.Reader, io.Reader, error) { return nil, nil, nil } - configBytes, err := io.ReadAll(config) + configBytes, err := ioutil.ReadAll(config) if err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go index d42852d93e6..14ba08482ae 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -43,8 +43,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { ) scheme.AddKnownTypes(SchemeGroupVersion, &AdmissionConfiguration{}, - &AuthenticationConfiguration{}, - &AuthorizationConfiguration{}, &EgressSelectorConfiguration{}, &TracingConfiguration{}, ) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go index f3b4ae321ef..dcb082e0953 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -157,188 +157,3 @@ type TracingConfiguration struct { // Embed the component config tracing configuration struct tracingapi.TracingConfiguration } - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuthenticationConfiguration provides versioned configuration for authentication. -type AuthenticationConfiguration struct { - metav1.TypeMeta - - JWT []JWTAuthenticator -} - -// JWTAuthenticator provides the configuration for a single JWT authenticator. -type JWTAuthenticator struct { - Issuer Issuer - ClaimValidationRules []ClaimValidationRule - ClaimMappings ClaimMappings - UserValidationRules []UserValidationRule -} - -// Issuer provides the configuration for a external provider specific settings. -type Issuer struct { - URL string - CertificateAuthority string - Audiences []string -} - -// ClaimValidationRule provides the configuration for a single claim validation rule. -type ClaimValidationRule struct { - Claim string - RequiredValue string - - Expression string - Message string -} - -// ClaimMappings provides the configuration for claim mapping -type ClaimMappings struct { - Username PrefixedClaimOrExpression - Groups PrefixedClaimOrExpression - UID ClaimOrExpression - Extra []ExtraMapping -} - -// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. -type PrefixedClaimOrExpression struct { - Claim string - Prefix *string - - Expression string -} - -// ClaimOrExpression provides the configuration for a single claim or expression. -type ClaimOrExpression struct { - Claim string - Expression string -} - -// ExtraMapping provides the configuration for a single extra mapping. -type ExtraMapping struct { - Key string - ValueExpression string -} - -// UserValidationRule provides the configuration for a single user validation rule. -type UserValidationRule struct { - Expression string - Message string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type AuthorizationConfiguration struct { - metav1.TypeMeta - - // Authorizers is an ordered list of authorizers to - // authorize requests against. - // This is similar to the --authorization-modes kube-apiserver flag - // Must be at least one. - Authorizers []AuthorizerConfiguration `json:"authorizers"` -} - -const ( - TypeWebhook AuthorizerType = "Webhook" - FailurePolicyNoOpinion string = "NoOpinion" - FailurePolicyDeny string = "Deny" - AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" - AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" -) - -type AuthorizerType string - -type AuthorizerConfiguration struct { - // Type refers to the type of the authorizer - // "Webhook" is supported in the generic API server - // Other API servers may support additional authorizer - // types like Node, RBAC, ABAC, etc. - Type AuthorizerType - - // Name used to describe the webhook - // This is explicitly used in monitoring machinery for metrics - // Note: Names must be DNS1123 labels like `myauthorizername` or - // subdomains like `myauthorizer.example.domain` - // Required, with no default - Name string - - // Webhook defines the configuration for a Webhook authorizer - // Must be defined when Type=Webhook - Webhook *WebhookConfiguration -} - -type WebhookConfiguration struct { - // The duration to cache 'authorized' responses from the webhook - // authorizer. - // Same as setting `--authorization-webhook-cache-authorized-ttl` flag - // Default: 5m0s - AuthorizedTTL metav1.Duration - // The duration to cache 'unauthorized' responses from the webhook - // authorizer. - // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag - // Default: 30s - UnauthorizedTTL metav1.Duration - // Timeout for the webhook request - // Maximum allowed value is 30s. - // Required, no default value. - Timeout metav1.Duration - // The API version of the authorization.k8s.io SubjectAccessReview to - // send to and expect from the webhook. - // Same as setting `--authorization-webhook-version` flag - // Valid values: v1beta1, v1 - // Required, no default value - SubjectAccessReviewVersion string - // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview - // version the CEL expressions are evaluated against - // Valid values: v1 - // Required, no default value - MatchConditionSubjectAccessReviewVersion string - // Controls the authorization decision when a webhook request fails to - // complete or returns a malformed response or errors evaluating - // matchConditions. - // Valid values: - // - NoOpinion: continue to subsequent authorizers to see if one of - // them allows the request - // - Deny: reject the request without consulting subsequent authorizers - // Required, with no default. - FailurePolicy string - - // ConnectionInfo defines how we talk to the webhook - ConnectionInfo WebhookConnectionInfo - - // matchConditions is a list of conditions that must be met for a request to be sent to this - // webhook. An empty list of matchConditions matches all requests. - // There are a maximum of 64 match conditions allowed. - // - // The exact matching logic is (in order): - // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. - // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. - // 3. If at least one matchCondition evaluates to an error (but none are FALSE): - // - If failurePolicy=Deny, then the webhook rejects the request - // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped - MatchConditions []WebhookMatchCondition -} - -type WebhookConnectionInfo struct { - // Controls how the webhook should communicate with the server. - // Valid values: - // - KubeConfigFile: use the file specified in kubeConfigFile to locate the - // server. - // - InClusterConfig: use the in-cluster configuration to call the - // SubjectAccessReview API hosted by kube-apiserver. This mode is not - // allowed for kube-apiserver. - Type string - - // Path to KubeConfigFile for connection info - // Required, if connectionInfo.Type is KubeConfig - KubeConfigFile *string -} - -type WebhookMatchCondition struct { - // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. - // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. - // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, - // the contents would be converted to the v1 version before evaluating the CEL expression. - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - Expression string -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go deleted file mode 100644 index a9af01fe76c..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "time" - - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { - if obj.AuthorizedTTL.Duration == 0 { - obj.AuthorizedTTL.Duration = 5 * time.Minute - } - if obj.UnauthorizedTTL.Duration == 0 { - obj.UnauthorizedTTL.Duration = 30 * time.Second - } -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go index 7d68ac0c62e..e4e16c01ce4 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go @@ -43,7 +43,7 @@ func init() { // We only register manually written functions here. The registration of the // generated functions takes place in the generated files. The separation // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) + localSchemeBuilder.Register(addKnownTypes) } // Adds the list of known types to the given scheme. @@ -53,8 +53,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &EgressSelectorConfiguration{}, ) scheme.AddKnownTypes(ConfigSchemeGroupVersion, - &AuthenticationConfiguration{}, - &AuthorizationConfiguration{}, &TracingConfiguration{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go index 9394ba6f70a..b18d84d9d3c 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -158,379 +158,3 @@ type TracingConfiguration struct { // Embed the component config tracing configuration struct tracingapi.TracingConfiguration `json:",inline"` } - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuthenticationConfiguration provides versioned configuration for authentication. -type AuthenticationConfiguration struct { - metav1.TypeMeta - - // jwt is a list of authenticator to authenticate Kubernetes users using - // JWT compliant tokens. The authenticator will attempt to parse a raw ID token, - // verify it's been signed by the configured issuer. The public key to verify the - // signature is discovered from the issuer's public endpoint using OIDC discovery. - // For an incoming token, each JWT authenticator will be attempted in - // the order in which it is specified in this list. Note however that - // other authenticators may run before or after the JWT authenticators. - // The specific position of JWT authenticators in relation to other - // authenticators is neither defined nor stable across releases. Since - // each JWT authenticator must have a unique issuer URL, at most one - // JWT authenticator will attempt to cryptographically validate the token. - JWT []JWTAuthenticator `json:"jwt"` -} - -// JWTAuthenticator provides the configuration for a single JWT authenticator. -type JWTAuthenticator struct { - // issuer contains the basic OIDC provider connection options. - // +required - Issuer Issuer `json:"issuer"` - - // claimValidationRules are rules that are applied to validate token claims to authenticate users. - // +optional - ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"` - - // claimMappings points claims of a token to be treated as user attributes. - // +required - ClaimMappings ClaimMappings `json:"claimMappings"` - - // userValidationRules are rules that are applied to final user before completing authentication. - // These allow invariants to be applied to incoming identities such as preventing the - // use of the system: prefix that is commonly used by Kubernetes components. - // The validation rules are logically ANDed together and must all return true for the validation to pass. - // +optional - UserValidationRules []UserValidationRule `json:"userValidationRules,omitempty"` -} - -// Issuer provides the configuration for a external provider specific settings. -type Issuer struct { - // url points to the issuer URL in a format https://url or https://url/path. - // This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. - // Same value as the --oidc-issuer-url flag. - // Used to fetch discovery information unless overridden by discoveryURL. - // Required to be unique. - // Note that egress selection configuration is not used for this network connection. - // +required - URL string `json:"url"` - - // certificateAuthority contains PEM-encoded certificate authority certificates - // used to validate the connection when fetching discovery information. - // If unset, the system verifier is used. - // Same value as the content of the file referenced by the --oidc-ca-file flag. - // +optional - CertificateAuthority string `json:"certificateAuthority,omitempty"` - - // audiences is the set of acceptable audiences the JWT must be issued to. - // At least one of the entries must match the "aud" claim in presented JWTs. - // Same value as the --oidc-client-id flag (though this field supports an array). - // Required to be non-empty. - // +required - Audiences []string `json:"audiences"` -} - -// ClaimValidationRule provides the configuration for a single claim validation rule. -type ClaimValidationRule struct { - // claim is the name of a required claim. - // Same as --oidc-required-claim flag. - // Only string claim keys are supported. - // Mutually exclusive with expression and message. - // +optional - Claim string `json:"claim,omitempty"` - // requiredValue is the value of a required claim. - // Same as --oidc-required-claim flag. - // Only string claim values are supported. - // If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. - // Mutually exclusive with expression and message. - // +optional - RequiredValue string `json:"requiredValue,omitempty"` - - // expression represents the expression which will be evaluated by CEL. - // Must produce a boolean. - // - // CEL expressions have access to the contents of the token claims, organized into CEL variable: - // - 'claims' is a map of claim names to claim values. - // For example, a variable named 'sub' can be accessed as 'claims.sub'. - // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. - // Must return true for the validation to pass. - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - // - // Mutually exclusive with claim and requiredValue. - // +optional - Expression string `json:"expression,omitempty"` - // message customizes the returned error message when expression returns false. - // message is a literal string. - // Mutually exclusive with claim and requiredValue. - // +optional - Message string `json:"message,omitempty"` -} - -// ClaimMappings provides the configuration for claim mapping -type ClaimMappings struct { - // username represents an option for the username attribute. - // The claim's value must be a singular string. - // Same as the --oidc-username-claim and --oidc-username-prefix flags. - // If username.expression is set, the expression must produce a string value. - // - // In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, - // the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. - // For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. - // For prefix: - // (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, - // set username.prefix="" - // (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same - // behavior using authentication config, set username.prefix="#" - // (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" - // +required - Username PrefixedClaimOrExpression `json:"username"` - // groups represents an option for the groups attribute. - // The claim's value must be a string or string array claim. - // If groups.claim is set, the prefix must be specified (and can be the empty string). - // If groups.expression is set, the expression must produce a string or string array value. - // "", [], and null values are treated as the group mapping not being present. - // +optional - Groups PrefixedClaimOrExpression `json:"groups,omitempty"` - - // uid represents an option for the uid attribute. - // Claim must be a singular string claim. - // If uid.expression is set, the expression must produce a string value. - // +optional - UID ClaimOrExpression `json:"uid"` - - // extra represents an option for the extra attribute. - // expression must produce a string or string array value. - // If the value is empty, the extra mapping will not be present. - // - // hard-coded extra key/value - // - key: "foo" - // valueExpression: "'bar'" - // This will result in an extra attribute - foo: ["bar"] - // - // hard-coded key, value copying claim value - // - key: "foo" - // valueExpression: "claims.some_claim" - // This will result in an extra attribute - foo: [value of some_claim] - // - // hard-coded key, value derived from claim value - // - key: "admin" - // valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' - // This will result in: - // - if is_admin claim is present and true, extra attribute - admin: ["true"] - // - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added - // - // +optional - Extra []ExtraMapping `json:"extra,omitempty"` -} - -// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. -type PrefixedClaimOrExpression struct { - // claim is the JWT claim to use. - // Mutually exclusive with expression. - // +optional - Claim string `json:"claim,omitempty"` - // prefix is prepended to claim's value to prevent clashes with existing names. - // prefix needs to be set if claim is set and can be the empty string. - // Mutually exclusive with expression. - // +optional - Prefix *string `json:"prefix,omitempty"` - - // expression represents the expression which will be evaluated by CEL. - // - // CEL expressions have access to the contents of the token claims, organized into CEL variable: - // - 'claims' is a map of claim names to claim values. - // For example, a variable named 'sub' can be accessed as 'claims.sub'. - // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - // - // Mutually exclusive with claim and prefix. - // +optional - Expression string `json:"expression,omitempty"` -} - -// ClaimOrExpression provides the configuration for a single claim or expression. -type ClaimOrExpression struct { - // claim is the JWT claim to use. - // Either claim or expression must be set. - // Mutually exclusive with expression. - // +optional - Claim string `json:"claim,omitempty"` - - // expression represents the expression which will be evaluated by CEL. - // - // CEL expressions have access to the contents of the token claims, organized into CEL variable: - // - 'claims' is a map of claim names to claim values. - // For example, a variable named 'sub' can be accessed as 'claims.sub'. - // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - // - // Mutually exclusive with claim. - // +optional - Expression string `json:"expression,omitempty"` -} - -// ExtraMapping provides the configuration for a single extra mapping. -type ExtraMapping struct { - // key is a string to use as the extra attribute key. - // key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid - // subdomain as defined by RFC 1123. All characters trailing the first "/" must - // be valid HTTP Path characters as defined by RFC 3986. - // key must be lowercase. - // +required - Key string `json:"key"` - - // valueExpression is a CEL expression to extract extra attribute value. - // valueExpression must produce a string or string array value. - // "", [], and null values are treated as the extra mapping not being present. - // Empty string values contained within a string array are filtered out. - // - // CEL expressions have access to the contents of the token claims, organized into CEL variable: - // - 'claims' is a map of claim names to claim values. - // For example, a variable named 'sub' can be accessed as 'claims.sub'. - // Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - // - // +required - ValueExpression string `json:"valueExpression"` -} - -// UserValidationRule provides the configuration for a single user info validation rule. -type UserValidationRule struct { - // expression represents the expression which will be evaluated by CEL. - // Must return true for the validation to pass. - // - // CEL expressions have access to the contents of UserInfo, organized into CEL variable: - // - 'user' - authentication.k8s.io/v1, Kind=UserInfo object - // Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. - // API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - // - // +required - Expression string `json:"expression"` - - // message customizes the returned error message when rule returns false. - // message is a literal string. - // +optional - Message string `json:"message,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type AuthorizationConfiguration struct { - metav1.TypeMeta - - // Authorizers is an ordered list of authorizers to - // authorize requests against. - // This is similar to the --authorization-modes kube-apiserver flag - // Must be at least one. - Authorizers []AuthorizerConfiguration `json:"authorizers"` -} - -const ( - TypeWebhook AuthorizerType = "Webhook" - FailurePolicyNoOpinion string = "NoOpinion" - FailurePolicyDeny string = "Deny" - AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" - AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" -) - -type AuthorizerType string - -type AuthorizerConfiguration struct { - // Type refers to the type of the authorizer - // "Webhook" is supported in the generic API server - // Other API servers may support additional authorizer - // types like Node, RBAC, ABAC, etc. - Type string `json:"type"` - - // Name used to describe the webhook - // This is explicitly used in monitoring machinery for metrics - // Note: Names must be DNS1123 labels like `myauthorizername` or - // subdomains like `myauthorizer.example.domain` - // Required, with no default - Name string `json:"name"` - - // Webhook defines the configuration for a Webhook authorizer - // Must be defined when Type=Webhook - // Must not be defined when Type!=Webhook - Webhook *WebhookConfiguration `json:"webhook,omitempty"` -} - -type WebhookConfiguration struct { - // The duration to cache 'authorized' responses from the webhook - // authorizer. - // Same as setting `--authorization-webhook-cache-authorized-ttl` flag - // Default: 5m0s - AuthorizedTTL metav1.Duration `json:"authorizedTTL"` - // The duration to cache 'unauthorized' responses from the webhook - // authorizer. - // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag - // Default: 30s - UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` - // Timeout for the webhook request - // Maximum allowed value is 30s. - // Required, no default value. - Timeout metav1.Duration `json:"timeout"` - // The API version of the authorization.k8s.io SubjectAccessReview to - // send to and expect from the webhook. - // Same as setting `--authorization-webhook-version` flag - // Valid values: v1beta1, v1 - // Required, no default value - SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` - // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview - // version the CEL expressions are evaluated against - // Valid values: v1 - // Required, no default value - MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` - // Controls the authorization decision when a webhook request fails to - // complete or returns a malformed response or errors evaluating - // matchConditions. - // Valid values: - // - NoOpinion: continue to subsequent authorizers to see if one of - // them allows the request - // - Deny: reject the request without consulting subsequent authorizers - // Required, with no default. - FailurePolicy string `json:"failurePolicy"` - - // ConnectionInfo defines how we talk to the webhook - ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` - - // matchConditions is a list of conditions that must be met for a request to be sent to this - // webhook. An empty list of matchConditions matches all requests. - // There are a maximum of 64 match conditions allowed. - // - // The exact matching logic is (in order): - // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. - // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. - // 3. If at least one matchCondition evaluates to an error (but none are FALSE): - // - If failurePolicy=Deny, then the webhook rejects the request - // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped - MatchConditions []WebhookMatchCondition `json:"matchConditions"` -} - -type WebhookConnectionInfo struct { - // Controls how the webhook should communicate with the server. - // Valid values: - // - KubeConfigFile: use the file specified in kubeConfigFile to locate the - // server. - // - InClusterConfig: use the in-cluster configuration to call the - // SubjectAccessReview API hosted by kube-apiserver. This mode is not - // allowed for kube-apiserver. - Type string `json:"type"` - - // Path to KubeConfigFile for connection info - // Required, if connectionInfo.Type is KubeConfig - KubeConfigFile *string `json:"kubeConfigFile"` -} - -type WebhookMatchCondition struct { - // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. - // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. - // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, - // the contents would be converted to the v1 version before evaluating the CEL expression. - // - // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - Expression string `json:"expression"` -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go index 92060206840..41b350c48e2 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -56,66 +56,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.AuthenticationConfiguration)(nil), (*AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(a.(*apiserver.AuthenticationConfiguration), b.(*AuthenticationConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.ClaimMappings)(nil), (*ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(a.(*apiserver.ClaimMappings), b.(*ClaimMappings), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClaimOrExpression)(nil), (*apiserver.ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(a.(*ClaimOrExpression), b.(*apiserver.ClaimOrExpression), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.ClaimOrExpression)(nil), (*ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(a.(*apiserver.ClaimOrExpression), b.(*ClaimOrExpression), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClaimValidationRule)(nil), (*apiserver.ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(a.(*ClaimValidationRule), b.(*apiserver.ClaimValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.ClaimValidationRule)(nil), (*ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(a.(*apiserver.ClaimValidationRule), b.(*ClaimValidationRule), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope) }); err != nil { @@ -141,46 +81,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ExtraMapping)(nil), (*apiserver.ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(a.(*ExtraMapping), b.(*apiserver.ExtraMapping), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.ExtraMapping)(nil), (*ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(a.(*apiserver.ExtraMapping), b.(*ExtraMapping), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Issuer)(nil), (*apiserver.Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Issuer_To_apiserver_Issuer(a.(*Issuer), b.(*apiserver.Issuer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.Issuer)(nil), (*Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_Issuer_To_v1alpha1_Issuer(a.(*apiserver.Issuer), b.(*Issuer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JWTAuthenticator)(nil), (*apiserver.JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(a.(*JWTAuthenticator), b.(*apiserver.JWTAuthenticator), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.JWTAuthenticator)(nil), (*JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(a.(*apiserver.JWTAuthenticator), b.(*JWTAuthenticator), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PrefixedClaimOrExpression)(nil), (*apiserver.PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(a.(*PrefixedClaimOrExpression), b.(*apiserver.PrefixedClaimOrExpression), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.PrefixedClaimOrExpression)(nil), (*PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(a.(*apiserver.PrefixedClaimOrExpression), b.(*PrefixedClaimOrExpression), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope) }); err != nil { @@ -231,46 +131,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*UserValidationRule)(nil), (*apiserver.UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(a.(*UserValidationRule), b.(*apiserver.UserValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.UserValidationRule)(nil), (*UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(a.(*apiserver.UserValidationRule), b.(*UserValidationRule), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) - }); err != nil { - return err - } if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) }); err != nil { @@ -323,156 +183,6 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginC return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s) } -func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { - out.JWT = *(*[]apiserver.JWTAuthenticator)(unsafe.Pointer(&in.JWT)) - return nil -} - -// Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in, out, s) -} - -func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { - out.JWT = *(*[]JWTAuthenticator)(unsafe.Pointer(&in.JWT)) - return nil -} - -// Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration is an autogenerated conversion function. -func Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { - return autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { - out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) - return nil -} - -// Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) -} - -func autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { - out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) - return nil -} - -// Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration is an autogenerated conversion function. -func Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { - return autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { - out.Type = apiserver.AuthorizerType(in.Type) - out.Name = in.Name - out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) - return nil -} - -// Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) -} - -func autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { - out.Type = string(in.Type) - out.Name = in.Name - out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) - return nil -} - -// Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration is an autogenerated conversion function. -func Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { - return autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { - if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { - return err - } - if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { - return err - } - if err := Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { - return err - } - out.Extra = *(*[]apiserver.ExtraMapping)(unsafe.Pointer(&in.Extra)) - return nil -} - -// Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings is an autogenerated conversion function. -func Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { - return autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in, out, s) -} - -func autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { - if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { - return err - } - if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { - return err - } - if err := Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { - return err - } - out.Extra = *(*[]ExtraMapping)(unsafe.Pointer(&in.Extra)) - return nil -} - -// Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings is an autogenerated conversion function. -func Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { - return autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in, out, s) -} - -func autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { - out.Claim = in.Claim - out.Expression = in.Expression - return nil -} - -// Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression is an autogenerated conversion function. -func Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { - return autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in, out, s) -} - -func autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { - out.Claim = in.Claim - out.Expression = in.Expression - return nil -} - -// Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression is an autogenerated conversion function. -func Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { - return autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in, out, s) -} - -func autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { - out.Claim = in.Claim - out.RequiredValue = in.RequiredValue - out.Expression = in.Expression - out.Message = in.Message - return nil -} - -// Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule is an autogenerated conversion function. -func Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { - return autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in, out, s) -} - -func autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { - out.Claim = in.Claim - out.RequiredValue = in.RequiredValue - out.Expression = in.Expression - out.Message = in.Message - return nil -} - -// Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule is an autogenerated conversion function. -func Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { - return autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in, out, s) -} - func autoConvert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol) out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport)) @@ -556,110 +266,6 @@ func Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorCon return autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in, out, s) } -func autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { - out.Key = in.Key - out.ValueExpression = in.ValueExpression - return nil -} - -// Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping is an autogenerated conversion function. -func Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { - return autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in, out, s) -} - -func autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { - out.Key = in.Key - out.ValueExpression = in.ValueExpression - return nil -} - -// Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping is an autogenerated conversion function. -func Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { - return autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in, out, s) -} - -func autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { - out.URL = in.URL - out.CertificateAuthority = in.CertificateAuthority - out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) - return nil -} - -// Convert_v1alpha1_Issuer_To_apiserver_Issuer is an autogenerated conversion function. -func Convert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { - return autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in, out, s) -} - -func autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { - out.URL = in.URL - out.CertificateAuthority = in.CertificateAuthority - out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) - return nil -} - -// Convert_apiserver_Issuer_To_v1alpha1_Issuer is an autogenerated conversion function. -func Convert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { - return autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in, out, s) -} - -func autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { - if err := Convert_v1alpha1_Issuer_To_apiserver_Issuer(&in.Issuer, &out.Issuer, s); err != nil { - return err - } - out.ClaimValidationRules = *(*[]apiserver.ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) - if err := Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { - return err - } - out.UserValidationRules = *(*[]apiserver.UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) - return nil -} - -// Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator is an autogenerated conversion function. -func Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { - return autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in, out, s) -} - -func autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { - if err := Convert_apiserver_Issuer_To_v1alpha1_Issuer(&in.Issuer, &out.Issuer, s); err != nil { - return err - } - out.ClaimValidationRules = *(*[]ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) - if err := Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { - return err - } - out.UserValidationRules = *(*[]UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) - return nil -} - -// Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator is an autogenerated conversion function. -func Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { - return autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in, out, s) -} - -func autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { - out.Claim = in.Claim - out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) - out.Expression = in.Expression - return nil -} - -// Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression is an autogenerated conversion function. -func Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { - return autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in, out, s) -} - -func autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { - out.Claim = in.Claim - out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) - out.Expression = in.Expression - return nil -} - -// Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression is an autogenerated conversion function. -func Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { - return autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in, out, s) -} - func autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { out.URL = in.URL out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig)) @@ -767,105 +373,3 @@ func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.U func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s) } - -func autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { - out.Expression = in.Expression - out.Message = in.Message - return nil -} - -// Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule is an autogenerated conversion function. -func Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { - return autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in, out, s) -} - -func autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { - out.Expression = in.Expression - out.Message = in.Message - return nil -} - -// Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule is an autogenerated conversion function. -func Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { - return autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in, out, s) -} - -func autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { - out.AuthorizedTTL = in.AuthorizedTTL - out.UnauthorizedTTL = in.UnauthorizedTTL - out.Timeout = in.Timeout - out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion - out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion - out.FailurePolicy = in.FailurePolicy - if err := Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { - return err - } - out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) - return nil -} - -// Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. -func Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) -} - -func autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { - out.AuthorizedTTL = in.AuthorizedTTL - out.UnauthorizedTTL = in.UnauthorizedTTL - out.Timeout = in.Timeout - out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion - out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion - out.FailurePolicy = in.FailurePolicy - if err := Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { - return err - } - out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) - return nil -} - -// Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration is an autogenerated conversion function. -func Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { - return autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { - out.Type = in.Type - out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) - return nil -} - -// Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. -func Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { - return autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) -} - -func autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { - out.Type = in.Type - out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) - return nil -} - -// Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo is an autogenerated conversion function. -func Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { - return autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in, out, s) -} - -func autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { - out.Expression = in.Expression - return nil -} - -// Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. -func Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { - return autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) -} - -func autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { - out.Expression = in.Expression - return nil -} - -// Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition is an autogenerated conversion function. -func Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { - return autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in, out, s) -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go index 932af612707..f8ac34035fd 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -78,147 +78,6 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.JWT != nil { - in, out := &in.JWT, &out.JWT - *out = make([]JWTAuthenticator, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. -func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { - if in == nil { - return nil - } - out := new(AuthenticationConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Authorizers != nil { - in, out := &in.Authorizers, &out.Authorizers - *out = make([]AuthorizerConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. -func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { - if in == nil { - return nil - } - out := new(AuthorizationConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { - *out = *in - if in.Webhook != nil { - in, out := &in.Webhook, &out.Webhook - *out = new(WebhookConfiguration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. -func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { - if in == nil { - return nil - } - out := new(AuthorizerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { - *out = *in - in.Username.DeepCopyInto(&out.Username) - in.Groups.DeepCopyInto(&out.Groups) - out.UID = in.UID - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make([]ExtraMapping, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. -func (in *ClaimMappings) DeepCopy() *ClaimMappings { - if in == nil { - return nil - } - out := new(ClaimMappings) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. -func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { - if in == nil { - return nil - } - out := new(ClaimOrExpression) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. -func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { - if in == nil { - return nil - } - out := new(ClaimValidationRule) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Connection) DeepCopyInto(out *Connection) { *out = *in @@ -289,92 +148,6 @@ func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. -func (in *ExtraMapping) DeepCopy() *ExtraMapping { - if in == nil { - return nil - } - out := new(ExtraMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Issuer) DeepCopyInto(out *Issuer) { - *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. -func (in *Issuer) DeepCopy() *Issuer { - if in == nil { - return nil - } - out := new(Issuer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { - *out = *in - in.Issuer.DeepCopyInto(&out.Issuer) - if in.ClaimValidationRules != nil { - in, out := &in.ClaimValidationRules, &out.ClaimValidationRules - *out = make([]ClaimValidationRule, len(*in)) - copy(*out, *in) - } - in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) - if in.UserValidationRules != nil { - in, out := &in.UserValidationRules, &out.UserValidationRules - *out = make([]UserValidationRule, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. -func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { - if in == nil { - return nil - } - out := new(JWTAuthenticator) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { - *out = *in - if in.Prefix != nil { - in, out := &in.Prefix, &out.Prefix - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. -func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { - if in == nil { - return nil - } - out := new(PrefixedClaimOrExpression) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { *out = *in @@ -479,81 +252,3 @@ func (in *UDSTransport) DeepCopy() *UDSTransport { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. -func (in *UserValidationRule) DeepCopy() *UserValidationRule { - if in == nil { - return nil - } - out := new(UserValidationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { - *out = *in - out.AuthorizedTTL = in.AuthorizedTTL - out.UnauthorizedTTL = in.UnauthorizedTTL - out.Timeout = in.Timeout - in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) - if in.MatchConditions != nil { - in, out := &in.MatchConditions, &out.MatchConditions - *out = make([]WebhookMatchCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. -func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { - if in == nil { - return nil - } - out := new(WebhookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { - *out = *in - if in.KubeConfigFile != nil { - in, out := &in.KubeConfigFile, &out.KubeConfigFile - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. -func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { - if in == nil { - return nil - } - out := new(WebhookConnectionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. -func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { - if in == nil { - return nil - } - out := new(WebhookMatchCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go index fc76be0fb8a..5070cb91b90 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -29,15 +29,5 @@ import ( // Public to allow building arbitrary schemes. // All generated defaulters are covering - they call all nested defaulters. func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) return nil } - -func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { - for i := range in.Authorizers { - a := &in.Authorizers[i] - if a.Webhook != nil { - SetDefaults_WebhookConfiguration(a.Webhook) - } - } -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go deleted file mode 100644 index 843324085cf..00000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go +++ /dev/null @@ -1,630 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - v1 "k8s.io/api/authorization/v1" - "k8s.io/api/authorization/v1beta1" - "k8s.io/apimachinery/pkg/util/sets" - utilvalidation "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - api "k8s.io/apiserver/pkg/apis/apiserver" - authenticationcel "k8s.io/apiserver/pkg/authentication/cel" - authorizationcel "k8s.io/apiserver/pkg/authorization/cel" - "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/util/cert" -) - -const ( - atLeastOneRequiredErrFmt = "at least one %s is required" -) - -var ( - root = field.NewPath("jwt") -) - -// ValidateAuthenticationConfiguration validates a given AuthenticationConfiguration. -func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration) field.ErrorList { - var allErrs field.ErrorList - - // This stricter validation is solely based on what the current implementation supports. - // TODO(aramase): when StructuredAuthenticationConfiguration feature gate is added and wired up, - // relax this check to allow 0 authenticators. This will allow us to support the case where - // API server is initially configured with no authenticators and then authenticators are added - // later via dynamic config. - if len(c.JWT) == 0 { - allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root))) - return allErrs - } - - // This stricter validation is because the --oidc-* flag option is singular. - // TODO(aramase): when StructuredAuthenticationConfiguration feature gate is added and wired up, - // remove the 1 authenticator limit check and add set the limit to 64. - if len(c.JWT) > 1 { - allErrs = append(allErrs, field.TooMany(root, len(c.JWT), 1)) - return allErrs - } - - // TODO(aramase): right now we only support a single JWT authenticator as - // this is wired to the --oidc-* flags. When StructuredAuthenticationConfiguration - // feature gate is added and wired up, we will remove the 1 authenticator limit - // check and add validation for duplicate issuers. - for i, a := range c.JWT { - fldPath := root.Index(i) - _, errs := validateJWTAuthenticator(a, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) - allErrs = append(allErrs, errs...) - } - - return allErrs -} - -// CompileAndValidateJWTAuthenticator validates a given JWTAuthenticator and returns a CELMapper with the compiled -// CEL expressions for claim mappings and validation rules. -// This is exported for use in oidc package. -func CompileAndValidateJWTAuthenticator(authenticator api.JWTAuthenticator) (authenticationcel.CELMapper, field.ErrorList) { - return validateJWTAuthenticator(authenticator, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) -} - -func validateJWTAuthenticator(authenticator api.JWTAuthenticator, fldPath *field.Path, structuredAuthnFeatureEnabled bool) (authenticationcel.CELMapper, field.ErrorList) { - var allErrs field.ErrorList - - compiler := authenticationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())) - mapper := &authenticationcel.CELMapper{} - - allErrs = append(allErrs, validateIssuer(authenticator.Issuer, fldPath.Child("issuer"))...) - allErrs = append(allErrs, validateClaimValidationRules(compiler, mapper, authenticator.ClaimValidationRules, fldPath.Child("claimValidationRules"), structuredAuthnFeatureEnabled)...) - allErrs = append(allErrs, validateClaimMappings(compiler, mapper, authenticator.ClaimMappings, fldPath.Child("claimMappings"), structuredAuthnFeatureEnabled)...) - allErrs = append(allErrs, validateUserValidationRules(compiler, mapper, authenticator.UserValidationRules, fldPath.Child("userValidationRules"), structuredAuthnFeatureEnabled)...) - - return *mapper, allErrs -} - -func validateIssuer(issuer api.Issuer, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - allErrs = append(allErrs, validateURL(issuer.URL, fldPath.Child("url"))...) - allErrs = append(allErrs, validateAudiences(issuer.Audiences, fldPath.Child("audiences"))...) - allErrs = append(allErrs, validateCertificateAuthority(issuer.CertificateAuthority, fldPath.Child("certificateAuthority"))...) - - return allErrs -} - -func validateURL(issuerURL string, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - if len(issuerURL) == 0 { - allErrs = append(allErrs, field.Required(fldPath, "URL is required")) - return allErrs - } - - u, err := url.Parse(issuerURL) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, err.Error())) - return allErrs - } - if u.Scheme != "https" { - allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL scheme must be https")) - } - if u.User != nil { - allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a username or password")) - } - if len(u.RawQuery) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a query")) - } - if len(u.Fragment) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a fragment")) - } - - return allErrs -} - -func validateAudiences(audiences []string, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - if len(audiences) == 0 { - allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, fldPath))) - return allErrs - } - // This stricter validation is because the --oidc-client-id flag option is singular. - // This will be removed when we support multiple audiences with the StructuredAuthenticationConfiguration feature gate. - if len(audiences) > 1 { - allErrs = append(allErrs, field.TooMany(fldPath, len(audiences), 1)) - return allErrs - } - - for i, audience := range audiences { - fldPath := fldPath.Index(i) - if len(audience) == 0 { - allErrs = append(allErrs, field.Required(fldPath, "audience can't be empty")) - } - } - - return allErrs -} - -func validateCertificateAuthority(certificateAuthority string, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - if len(certificateAuthority) == 0 { - return allErrs - } - _, err := cert.NewPoolFromBytes([]byte(certificateAuthority)) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, "", err.Error())) - } - - return allErrs -} - -func validateClaimValidationRules(compiler authenticationcel.Compiler, celMapper *authenticationcel.CELMapper, rules []api.ClaimValidationRule, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { - var allErrs field.ErrorList - - seenClaims := sets.NewString() - seenExpressions := sets.NewString() - var compilationResults []authenticationcel.CompilationResult - - for i, rule := range rules { - fldPath := fldPath.Index(i) - - if len(rule.Expression) > 0 && !structuredAuthnFeatureEnabled { - allErrs = append(allErrs, field.Invalid(fldPath.Child("expression"), rule.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) - } - - switch { - case len(rule.Claim) > 0 && len(rule.Expression) > 0: - allErrs = append(allErrs, field.Invalid(fldPath, rule.Claim, "claim and expression can't both be set")) - case len(rule.Claim) == 0 && len(rule.Expression) == 0: - allErrs = append(allErrs, field.Required(fldPath, "claim or expression is required")) - case len(rule.Claim) > 0: - if len(rule.Message) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("message"), rule.Message, "message can't be set when claim is set")) - } - if seenClaims.Has(rule.Claim) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("claim"), rule.Claim)) - } - seenClaims.Insert(rule.Claim) - case len(rule.Expression) > 0: - if len(rule.RequiredValue) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("requiredValue"), rule.RequiredValue, "requiredValue can't be set when expression is set")) - } - if seenExpressions.Has(rule.Expression) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("expression"), rule.Expression)) - continue - } - seenExpressions.Insert(rule.Expression) - - compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ClaimValidationCondition{ - Expression: rule.Expression, - }, fldPath.Child("expression")) - - if err != nil { - allErrs = append(allErrs, err) - continue - } - if compilationResult != nil { - compilationResults = append(compilationResults, *compilationResult) - } - } - } - - if structuredAuthnFeatureEnabled && len(compilationResults) > 0 { - celMapper.ClaimValidationRules = authenticationcel.NewClaimsMapper(compilationResults) - } - - return allErrs -} - -func validateClaimMappings(compiler authenticationcel.Compiler, celMapper *authenticationcel.CELMapper, m api.ClaimMappings, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { - var allErrs field.ErrorList - - if !structuredAuthnFeatureEnabled { - if len(m.Username.Expression) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("username").Child("expression"), m.Username.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) - } - if len(m.Groups.Expression) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("groups").Child("expression"), m.Groups.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) - } - if len(m.UID.Claim) > 0 || len(m.UID.Expression) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), "", "uid claim mapping is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) - } - if len(m.Extra) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("extra"), "", "extra claim mapping is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) - } - } - - compilationResult, err := validatePrefixClaimOrExpression(compiler, m.Username, fldPath.Child("username"), true, structuredAuthnFeatureEnabled) - if err != nil { - allErrs = append(allErrs, err...) - } else if compilationResult != nil && structuredAuthnFeatureEnabled { - celMapper.Username = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) - } - - compilationResult, err = validatePrefixClaimOrExpression(compiler, m.Groups, fldPath.Child("groups"), false, structuredAuthnFeatureEnabled) - if err != nil { - allErrs = append(allErrs, err...) - } else if compilationResult != nil && structuredAuthnFeatureEnabled { - celMapper.Groups = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) - } - - switch { - case len(m.UID.Claim) > 0 && len(m.UID.Expression) > 0: - allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), "", "claim and expression can't both be set")) - case len(m.UID.Expression) > 0: - compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ClaimMappingExpression{ - Expression: m.UID.Expression, - }, fldPath.Child("uid").Child("expression")) - - if err != nil { - allErrs = append(allErrs, err) - } else if structuredAuthnFeatureEnabled && compilationResult != nil { - celMapper.UID = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) - } - } - - var extraCompilationResults []authenticationcel.CompilationResult - seenExtraKeys := sets.NewString() - - for i, mapping := range m.Extra { - fldPath := fldPath.Child("extra").Index(i) - // Key should be namespaced to the authenticator or authenticator/authorizer pair making use of them. - // For instance: "example.org/foo" instead of "foo". - // xref: https://github.com/kubernetes/kubernetes/blob/3825e206cb162a7ad7431a5bdf6a065ae8422cf7/staging/src/k8s.io/apiserver/pkg/authentication/user/user.go#L31-L41 - // IsDomainPrefixedPath checks for non-empty key and that the key is prefixed with a domain name. - allErrs = append(allErrs, utilvalidation.IsDomainPrefixedPath(fldPath.Child("key"), mapping.Key)...) - if mapping.Key != strings.ToLower(mapping.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), mapping.Key, "key must be lowercase")) - } - if seenExtraKeys.Has(mapping.Key) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("key"), mapping.Key)) - continue - } - seenExtraKeys.Insert(mapping.Key) - - if len(mapping.ValueExpression) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("valueExpression"), "valueExpression is required")) - continue - } - - compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ExtraMappingExpression{ - Key: mapping.Key, - Expression: mapping.ValueExpression, - }, fldPath.Child("valueExpression")) - - if err != nil { - allErrs = append(allErrs, err) - continue - } - - if compilationResult != nil { - extraCompilationResults = append(extraCompilationResults, *compilationResult) - } - } - - if structuredAuthnFeatureEnabled && len(extraCompilationResults) > 0 { - celMapper.Extra = authenticationcel.NewClaimsMapper(extraCompilationResults) - } - - return allErrs -} - -func validatePrefixClaimOrExpression(compiler authenticationcel.Compiler, mapping api.PrefixedClaimOrExpression, fldPath *field.Path, claimOrExpressionRequired, structuredAuthnFeatureEnabled bool) (*authenticationcel.CompilationResult, field.ErrorList) { - var allErrs field.ErrorList - - var compilationResult *authenticationcel.CompilationResult - switch { - case len(mapping.Expression) > 0 && len(mapping.Claim) > 0: - allErrs = append(allErrs, field.Invalid(fldPath, "", "claim and expression can't both be set")) - case len(mapping.Expression) == 0 && len(mapping.Claim) == 0 && claimOrExpressionRequired: - allErrs = append(allErrs, field.Required(fldPath, "claim or expression is required")) - case len(mapping.Expression) > 0: - var err *field.Error - - if mapping.Prefix != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("prefix"), *mapping.Prefix, "prefix can't be set when expression is set")) - } - compilationResult, err = compileClaimsCELExpression(compiler, &authenticationcel.ClaimMappingExpression{ - Expression: mapping.Expression, - }, fldPath.Child("expression")) - - if err != nil { - allErrs = append(allErrs, err) - } - - case len(mapping.Claim) > 0: - if mapping.Prefix == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("prefix"), "prefix is required when claim is set. It can be set to an empty string to disable prefixing")) - } - } - - return compilationResult, allErrs -} - -func validateUserValidationRules(compiler authenticationcel.Compiler, celMapper *authenticationcel.CELMapper, rules []api.UserValidationRule, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { - var allErrs field.ErrorList - var compilationResults []authenticationcel.CompilationResult - - if len(rules) > 0 && !structuredAuthnFeatureEnabled { - allErrs = append(allErrs, field.Invalid(fldPath, "", "user validation rules are not supported when StructuredAuthenticationConfiguration feature gate is disabled")) - } - - seenExpressions := sets.NewString() - for i, rule := range rules { - fldPath := fldPath.Index(i) - - if len(rule.Expression) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("expression"), "expression is required")) - continue - } - - if seenExpressions.Has(rule.Expression) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("expression"), rule.Expression)) - continue - } - seenExpressions.Insert(rule.Expression) - - compilationResult, err := compileUserCELExpression(compiler, &authenticationcel.UserValidationCondition{ - Expression: rule.Expression, - Message: rule.Message, - }, fldPath.Child("expression")) - - if err != nil { - allErrs = append(allErrs, err) - continue - } - - if compilationResult != nil { - compilationResults = append(compilationResults, *compilationResult) - } - } - - if structuredAuthnFeatureEnabled && len(compilationResults) > 0 { - celMapper.UserValidationRules = authenticationcel.NewUserMapper(compilationResults) - } - - return allErrs -} - -func compileClaimsCELExpression(compiler authenticationcel.Compiler, expression authenticationcel.ExpressionAccessor, fldPath *field.Path) (*authenticationcel.CompilationResult, *field.Error) { - compilationResult, err := compiler.CompileClaimsExpression(expression) - if err != nil { - return nil, convertCELErrorToValidationError(fldPath, expression, err) - } - return &compilationResult, nil -} - -func compileUserCELExpression(compiler authenticationcel.Compiler, expression authenticationcel.ExpressionAccessor, fldPath *field.Path) (*authenticationcel.CompilationResult, *field.Error) { - compilationResult, err := compiler.CompileUserExpression(expression) - if err != nil { - return nil, convertCELErrorToValidationError(fldPath, expression, err) - } - return &compilationResult, nil -} - -// ValidateAuthorizationConfiguration validates a given AuthorizationConfiguration. -func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.AuthorizationConfiguration, knownTypes sets.String, repeatableTypes sets.String) field.ErrorList { - allErrs := field.ErrorList{} - - if len(c.Authorizers) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("authorizers"), "at least one authorization mode must be defined")) - } - - seenAuthorizerTypes := sets.NewString() - seenAuthorizerNames := sets.NewString() - for i, a := range c.Authorizers { - fldPath := fldPath.Child("authorizers").Index(i) - aType := string(a.Type) - if aType == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("type"), "")) - continue - } - if !knownTypes.Has(aType) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), aType, knownTypes.List())) - continue - } - if seenAuthorizerTypes.Has(aType) && !repeatableTypes.Has(aType) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("type"), aType)) - continue - } - seenAuthorizerTypes.Insert(aType) - - if len(a.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if seenAuthorizerNames.Has(a.Name) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), a.Name)) - } else if errs := utilvalidation.IsDNS1123Subdomain(a.Name); len(errs) != 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), a.Name, fmt.Sprintf("authorizer name is invalid: %s", strings.Join(errs, ", ")))) - } - seenAuthorizerNames.Insert(a.Name) - - switch a.Type { - case api.TypeWebhook: - if a.Webhook == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("webhook"), "required when type=Webhook")) - continue - } - allErrs = append(allErrs, ValidateWebhookConfiguration(fldPath, a.Webhook)...) - default: - if a.Webhook != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("webhook"), "non-null", "may only be specified when type=Webhook")) - } - } - } - - return allErrs -} - -func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfiguration) field.ErrorList { - allErrs := field.ErrorList{} - - if c.Timeout.Duration == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("timeout"), "")) - } else if c.Timeout.Duration > 30*time.Second || c.Timeout.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("timeout"), c.Timeout.Duration.String(), "must be > 0s and <= 30s")) - } - - if c.AuthorizedTTL.Duration == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("authorizedTTL"), "")) - } else if c.AuthorizedTTL.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("authorizedTTL"), c.AuthorizedTTL.Duration.String(), "must be > 0s")) - } - - if c.UnauthorizedTTL.Duration == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("unauthorizedTTL"), "")) - } else if c.UnauthorizedTTL.Duration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("unauthorizedTTL"), c.UnauthorizedTTL.Duration.String(), "must be > 0s")) - } - - switch c.SubjectAccessReviewVersion { - case "": - allErrs = append(allErrs, field.Required(fldPath.Child("subjectAccessReviewVersion"), "")) - case "v1": - _ = &v1.SubjectAccessReview{} - case "v1beta1": - _ = &v1beta1.SubjectAccessReview{} - default: - allErrs = append(allErrs, field.NotSupported(fldPath.Child("subjectAccessReviewVersion"), c.SubjectAccessReviewVersion, []string{"v1", "v1beta1"})) - } - - switch c.MatchConditionSubjectAccessReviewVersion { - case "": - if len(c.MatchConditions) > 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("matchConditionSubjectAccessReviewVersion"), "required if match conditions are specified")) - } - case "v1": - _ = &v1.SubjectAccessReview{} - default: - allErrs = append(allErrs, field.NotSupported(fldPath.Child("matchConditionSubjectAccessReviewVersion"), c.MatchConditionSubjectAccessReviewVersion, []string{"v1"})) - } - - switch c.FailurePolicy { - case "": - allErrs = append(allErrs, field.Required(fldPath.Child("failurePolicy"), "")) - case api.FailurePolicyNoOpinion, api.FailurePolicyDeny: - default: - allErrs = append(allErrs, field.NotSupported(fldPath.Child("failurePolicy"), c.FailurePolicy, []string{"NoOpinion", "Deny"})) - } - - switch c.ConnectionInfo.Type { - case "": - allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "type"), "")) - case api.AuthorizationWebhookConnectionInfoTypeInCluster: - if c.ConnectionInfo.KubeConfigFile != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "can only be set when type=KubeConfigFile")) - } - case api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile: - if c.ConnectionInfo.KubeConfigFile == nil || *c.ConnectionInfo.KubeConfigFile == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "kubeConfigFile"), "")) - } else if !filepath.IsAbs(*c.ConnectionInfo.KubeConfigFile) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be an absolute path")) - } else if info, err := os.Stat(*c.ConnectionInfo.KubeConfigFile); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, fmt.Sprintf("error loading file: %v", err))) - } else if !info.Mode().IsRegular() { - allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be a regular file")) - } - default: - allErrs = append(allErrs, field.NotSupported(fldPath.Child("connectionInfo", "type"), c.ConnectionInfo, []string{api.AuthorizationWebhookConnectionInfoTypeInCluster, api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile})) - } - - _, errs := compileMatchConditions(c.MatchConditions, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) - allErrs = append(allErrs, errs...) - - return allErrs -} - -// ValidateAndCompileMatchConditions validates a given webhook's matchConditions. -// This is exported for use in authz package. -func ValidateAndCompileMatchConditions(matchConditions []api.WebhookMatchCondition) (*authorizationcel.CELMatcher, field.ErrorList) { - return compileMatchConditions(matchConditions, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) -} - -func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath *field.Path, structuredAuthzFeatureEnabled bool) (*authorizationcel.CELMatcher, field.ErrorList) { - var allErrs field.ErrorList - // should fail when match conditions are used without feature enabled - if len(matchConditions) > 0 && !structuredAuthzFeatureEnabled { - allErrs = append(allErrs, field.Invalid(fldPath.Child("matchConditions"), "", "matchConditions are not supported when StructuredAuthorizationConfiguration feature gate is disabled")) - } - if len(matchConditions) > 64 { - allErrs = append(allErrs, field.TooMany(fldPath.Child("matchConditions"), len(matchConditions), 64)) - return nil, allErrs - } - - compiler := authorizationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion())) - seenExpressions := sets.NewString() - var compilationResults []authorizationcel.CompilationResult - - for i, condition := range matchConditions { - fldPath := fldPath.Child("matchConditions").Index(i).Child("expression") - if len(strings.TrimSpace(condition.Expression)) == 0 { - allErrs = append(allErrs, field.Required(fldPath, "")) - continue - } - if seenExpressions.Has(condition.Expression) { - allErrs = append(allErrs, field.Duplicate(fldPath, condition.Expression)) - continue - } - seenExpressions.Insert(condition.Expression) - compilationResult, err := compileMatchConditionsExpression(fldPath, compiler, condition.Expression) - if err != nil { - allErrs = append(allErrs, err) - continue - } - compilationResults = append(compilationResults, compilationResult) - } - if len(compilationResults) == 0 { - return nil, allErrs - } - return &authorizationcel.CELMatcher{ - CompilationResults: compilationResults, - }, allErrs -} - -func compileMatchConditionsExpression(fldPath *field.Path, compiler authorizationcel.Compiler, expression string) (authorizationcel.CompilationResult, *field.Error) { - authzExpression := &authorizationcel.SubjectAccessReviewMatchCondition{ - Expression: expression, - } - compilationResult, err := compiler.CompileCELExpression(authzExpression) - if err != nil { - return compilationResult, convertCELErrorToValidationError(fldPath, authzExpression, err) - } - return compilationResult, nil -} - -func convertCELErrorToValidationError(fldPath *field.Path, expression authorizationcel.ExpressionAccessor, err error) *field.Error { - var celErr *cel.Error - if errors.As(err, &celErr) { - switch celErr.Type { - case cel.ErrorTypeRequired: - return field.Required(fldPath, celErr.Detail) - case cel.ErrorTypeInvalid: - return field.Invalid(fldPath, expression.GetExpression(), celErr.Detail) - default: - return field.InternalError(fldPath, celErr) - } - } - return field.InternalError(fldPath, fmt.Errorf("error is not cel error: %w", err)) -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go index 77e5c314219..40c8b4a6e9b 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -78,147 +78,6 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.JWT != nil { - in, out := &in.JWT, &out.JWT - *out = make([]JWTAuthenticator, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. -func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { - if in == nil { - return nil - } - out := new(AuthenticationConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Authorizers != nil { - in, out := &in.Authorizers, &out.Authorizers - *out = make([]AuthorizerConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. -func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { - if in == nil { - return nil - } - out := new(AuthorizationConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { - *out = *in - if in.Webhook != nil { - in, out := &in.Webhook, &out.Webhook - *out = new(WebhookConfiguration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. -func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { - if in == nil { - return nil - } - out := new(AuthorizerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { - *out = *in - in.Username.DeepCopyInto(&out.Username) - in.Groups.DeepCopyInto(&out.Groups) - out.UID = in.UID - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make([]ExtraMapping, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. -func (in *ClaimMappings) DeepCopy() *ClaimMappings { - if in == nil { - return nil - } - out := new(ClaimMappings) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. -func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { - if in == nil { - return nil - } - out := new(ClaimOrExpression) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. -func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { - if in == nil { - return nil - } - out := new(ClaimValidationRule) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Connection) DeepCopyInto(out *Connection) { *out = *in @@ -289,92 +148,6 @@ func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. -func (in *ExtraMapping) DeepCopy() *ExtraMapping { - if in == nil { - return nil - } - out := new(ExtraMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Issuer) DeepCopyInto(out *Issuer) { - *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. -func (in *Issuer) DeepCopy() *Issuer { - if in == nil { - return nil - } - out := new(Issuer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { - *out = *in - in.Issuer.DeepCopyInto(&out.Issuer) - if in.ClaimValidationRules != nil { - in, out := &in.ClaimValidationRules, &out.ClaimValidationRules - *out = make([]ClaimValidationRule, len(*in)) - copy(*out, *in) - } - in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) - if in.UserValidationRules != nil { - in, out := &in.UserValidationRules, &out.UserValidationRules - *out = make([]UserValidationRule, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. -func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { - if in == nil { - return nil - } - out := new(JWTAuthenticator) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { - *out = *in - if in.Prefix != nil { - in, out := &in.Prefix, &out.Prefix - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. -func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { - if in == nil { - return nil - } - out := new(PrefixedClaimOrExpression) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { *out = *in @@ -479,81 +252,3 @@ func (in *UDSTransport) DeepCopy() *UDSTransport { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. -func (in *UserValidationRule) DeepCopy() *UserValidationRule { - if in == nil { - return nil - } - out := new(UserValidationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { - *out = *in - out.AuthorizedTTL = in.AuthorizedTTL - out.UnauthorizedTTL = in.UnauthorizedTTL - out.Timeout = in.Timeout - in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) - if in.MatchConditions != nil { - in, out := &in.MatchConditions, &out.MatchConditions - *out = make([]WebhookMatchCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. -func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { - if in == nil { - return nil - } - out := new(WebhookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { - *out = *in - if in.KubeConfigFile != nil { - in, out := &in.KubeConfigFile, &out.KubeConfigFile - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. -func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { - if in == nil { - return nil - } - out := new(WebhookConnectionInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. -func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { - if in == nil { - return nil - } - out := new(WebhookMatchCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go index 17a398ed8a4..f369b2229b9 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -235,10 +235,10 @@ type PolicyRule struct { Namespaces []string // NonResourceURLs is a set of URL paths that should be audited. - // `*`s are allowed, but only as the full, final step in the path. + // *s are allowed, but only as the full, final step in the path. // Examples: - // `/metrics` - Log requests for apiserver metrics - // `/healthz*` - Log all health checks + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks // +optional NonResourceURLs []string @@ -269,11 +269,11 @@ type GroupResources struct { // Resources is a list of resources this rule applies to. // // For example: - // - `pods` matches pods. - // - `pods/log` matches the log subresource of pods. - // - `*` matches all resources and their subresources. - // - `pods/*` matches all subresources of pods. - // - `*/scale` matches all scale subresources. + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto index 13c41e54ce6..8cdb12cdf96 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -129,11 +129,11 @@ message GroupResources { // Resources is a list of resources this rule applies to. // // For example: - // - `pods` matches pods. - // - `pods/log` matches the log subresource of pods. - // - `*` matches all resources and their subresources. - // - `pods/*` matches all subresources of pods. - // - `*/scale` matches all scale subresources. + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. @@ -248,10 +248,10 @@ message PolicyRule { repeated string namespaces = 6; // NonResourceURLs is a set of URL paths that should be audited. - // `*`s are allowed, but only as the full, final step in the path. + // *s are allowed, but only as the full, final step in the path. // Examples: - // - `/metrics` - Log requests for apiserver metrics - // - `/healthz*` - Log all health checks + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks // +optional repeated string nonResourceURLs = 7; diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go index 151c56c689b..27f4729eaaf 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go @@ -229,10 +229,10 @@ type PolicyRule struct { Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` // NonResourceURLs is a set of URL paths that should be audited. - // `*`s are allowed, but only as the full, final step in the path. + // *s are allowed, but only as the full, final step in the path. // Examples: - // - `/metrics` - Log requests for apiserver metrics - // - `/healthz*` - Log all health checks + // "/metrics" - Log requests for apiserver metrics + // "/healthz*" - Log all health checks // +optional NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` @@ -263,11 +263,11 @@ type GroupResources struct { // Resources is a list of resources this rule applies to. // // For example: - // - `pods` matches pods. - // - `pods/log` matches the log subresource of pods. - // - `*` matches all resources and their subresources. - // - `pods/*` matches all subresources of pods. - // - `*/scale` matches all scale subresources. + // 'pods' matches pods. + // 'pods/log' matches the log subresource of pods. + // '*' matches all resources and their subresources. + // 'pods/*' matches all subresources of pods. + // '*/scale' matches all scale subresources. // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. diff --git a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go index aca968de643..b037371e3a8 100644 --- a/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go +++ b/vendor/k8s.io/apiserver/pkg/apis/flowcontrol/bootstrap/default.go @@ -19,11 +19,11 @@ package bootstrap import ( coordinationv1 "k8s.io/api/coordination/v1" corev1 "k8s.io/api/core/v1" - flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/utils/ptr" + "k8s.io/utils/pointer" ) // The objects that define an apiserver's initial behavior. The @@ -90,8 +90,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementExempt, Exempt: &flowcontrol.ExemptPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(0)), - LendablePercent: ptr.To(int32(0)), + NominalConcurrencyShares: pointer.Int32(0), + LendablePercent: pointer.Int32(0), }, }, ) @@ -100,8 +100,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(5)), - LendablePercent: ptr.To(int32(0)), + NominalConcurrencyShares: 5, + LendablePercent: pointer.Int32(0), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeReject, }, @@ -173,8 +173,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(30)), - LendablePercent: ptr.To(int32(33)), + NominalConcurrencyShares: 30, + LendablePercent: pointer.Int32(33), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -190,8 +190,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(40)), - LendablePercent: ptr.To(int32(25)), + NominalConcurrencyShares: 40, + LendablePercent: pointer.Int32(25), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -208,8 +208,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(10)), - LendablePercent: ptr.To(int32(0)), + NominalConcurrencyShares: 10, + LendablePercent: pointer.Int32(0), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -226,8 +226,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(40)), - LendablePercent: ptr.To(int32(50)), + NominalConcurrencyShares: 40, + LendablePercent: pointer.Int32(50), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -244,8 +244,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(100)), - LendablePercent: ptr.To(int32(90)), + NominalConcurrencyShares: 100, + LendablePercent: pointer.Int32(90), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ @@ -262,8 +262,8 @@ var ( flowcontrol.PriorityLevelConfigurationSpec{ Type: flowcontrol.PriorityLevelEnablementLimited, Limited: &flowcontrol.LimitedPriorityLevelConfiguration{ - NominalConcurrencyShares: ptr.To(int32(20)), - LendablePercent: ptr.To(int32(50)), + NominalConcurrencyShares: 20, + LendablePercent: pointer.Int32(50), LimitResponse: flowcontrol.LimitResponse{ Type: flowcontrol.LimitResponseTypeQueue, Queuing: &flowcontrol.QueuingConfiguration{ diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go deleted file mode 100644 index 3bcff5e9051..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "fmt" - - "github.com/google/cel-go/cel" - - "k8s.io/apimachinery/pkg/util/version" - apiservercel "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" -) - -const ( - claimsVarName = "claims" - userVarName = "user" -) - -// compiler implements the Compiler interface. -type compiler struct { - // varEnvs is a map of CEL environments, keyed by the name of the CEL variable. - // The CEL variable is available to the expression. - // We have 2 environments, one for claims and one for user. - varEnvs map[string]*environment.EnvSet -} - -// NewCompiler returns a new Compiler. -func NewCompiler(env *environment.EnvSet) Compiler { - return &compiler{ - varEnvs: mustBuildEnvs(env), - } -} - -// CompileClaimsExpression compiles the given expressionAccessor into a CEL program that can be evaluated. -// The claims CEL variable is available to the expression. -func (c compiler) CompileClaimsExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { - return c.compile(expressionAccessor, claimsVarName) -} - -// CompileUserExpression compiles the given expressionAccessor into a CEL program that can be evaluated. -// The user CEL variable is available to the expression. -func (c compiler) CompileUserExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { - return c.compile(expressionAccessor, userVarName) -} - -func (c compiler) compile(expressionAccessor ExpressionAccessor, envVarName string) (CompilationResult, error) { - resultError := func(errorString string, errType apiservercel.ErrorType) (CompilationResult, error) { - return CompilationResult{}, &apiservercel.Error{ - Type: errType, - Detail: errorString, - } - } - - env, err := c.varEnvs[envVarName].Env(environment.StoredExpressions) - if err != nil { - return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) - } - - ast, issues := env.Compile(expressionAccessor.GetExpression()) - if issues != nil { - return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) - } - - found := false - returnTypes := expressionAccessor.ReturnTypes() - for _, returnType := range returnTypes { - if ast.OutputType() == returnType || cel.AnyType == returnType { - found = true - break - } - } - if !found { - var reason string - if len(returnTypes) == 1 { - reason = fmt.Sprintf("must evaluate to %v", returnTypes[0].String()) - } else { - reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) - } - - return resultError(reason, apiservercel.ErrorTypeInvalid) - } - - if _, err = cel.AstToCheckedExpr(ast); err != nil { - // should be impossible since env.Compile returned no issues - return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) - } - prog, err := env.Program(ast) - if err != nil { - return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) - } - - return CompilationResult{ - Program: prog, - ExpressionAccessor: expressionAccessor, - }, nil -} - -func buildUserType() *apiservercel.DeclType { - field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { - return apiservercel.NewDeclField(name, declType, required, nil, nil) - } - fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { - result := make(map[string]*apiservercel.DeclField, len(fields)) - for _, f := range fields { - result[f.Name] = f - } - return result - } - - return apiservercel.NewObjectType("kubernetes.UserInfo", fields( - field("username", apiservercel.StringType, false), - field("uid", apiservercel.StringType, false), - field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), - field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), - )) -} - -func mustBuildEnvs(baseEnv *environment.EnvSet) map[string]*environment.EnvSet { - buildEnvSet := func(envOpts []cel.EnvOption, declTypes []*apiservercel.DeclType) *environment.EnvSet { - env, err := baseEnv.Extend(environment.VersionedOptions{ - IntroducedVersion: version.MajorMinor(1, 0), - EnvOptions: envOpts, - DeclTypes: declTypes, - }) - if err != nil { - panic(fmt.Sprintf("environment misconfigured: %v", err)) - } - return env - } - - userType := buildUserType() - claimsType := apiservercel.NewMapType(apiservercel.StringType, apiservercel.AnyType, -1) - - envs := make(map[string]*environment.EnvSet, 2) // build two environments, one for claims and one for user - envs[claimsVarName] = buildEnvSet([]cel.EnvOption{cel.Variable(claimsVarName, claimsType.CelType())}, []*apiservercel.DeclType{claimsType}) - envs[userVarName] = buildEnvSet([]cel.EnvOption{cel.Variable(userVarName, userType.CelType())}, []*apiservercel.DeclType{userType}) - - return envs -} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go deleted file mode 100644 index 7ec0c9af6af..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cel contains the CEL related interfaces and structs for authentication. -package cel - -import ( - "context" - - celgo "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types/ref" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -// ExpressionAccessor is an interface that provides access to a CEL expression. -type ExpressionAccessor interface { - GetExpression() string - ReturnTypes() []*celgo.Type -} - -// CompilationResult represents a compiled validations expression. -type CompilationResult struct { - Program celgo.Program - ExpressionAccessor ExpressionAccessor -} - -// EvaluationResult contains the minimal required fields and metadata of a cel evaluation -type EvaluationResult struct { - EvalResult ref.Val - ExpressionAccessor ExpressionAccessor -} - -// Compiler provides a CEL expression compiler configured with the desired authentication related CEL variables. -type Compiler interface { - CompileClaimsExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) - CompileUserExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) -} - -// ClaimsMapper provides a CEL expression mapper configured with the claims CEL variable. -type ClaimsMapper interface { - // EvalClaimMapping evaluates the given claim mapping expression and returns a EvaluationResult. - // This is used for username, groups and uid claim mapping that contains a single expression. - EvalClaimMapping(ctx context.Context, claims *unstructured.Unstructured) (EvaluationResult, error) - // EvalClaimMappings evaluates the given expressions and returns a list of EvaluationResult. - // This is used for extra claim mapping and claim validation that contains a list of expressions. - EvalClaimMappings(ctx context.Context, claims *unstructured.Unstructured) ([]EvaluationResult, error) -} - -// UserMapper provides a CEL expression mapper configured with the user CEL variable. -type UserMapper interface { - // EvalUser evaluates the given user expressions and returns a list of EvaluationResult. - // This is used for user validation that contains a list of expressions. - EvalUser(ctx context.Context, userInfo *unstructured.Unstructured) ([]EvaluationResult, error) -} - -var _ ExpressionAccessor = &ClaimMappingExpression{} - -// ClaimMappingExpression is a CEL expression that maps a claim. -type ClaimMappingExpression struct { - Expression string -} - -// GetExpression returns the CEL expression. -func (v *ClaimMappingExpression) GetExpression() string { - return v.Expression -} - -// ReturnTypes returns the CEL expression return types. -func (v *ClaimMappingExpression) ReturnTypes() []*celgo.Type { - // return types is only used for validation. The claims variable that's available - // to the claim mapping expressions is a map[string]interface{}, so we can't - // really know what the return type is during compilation. Strict type checking - // is done during evaluation. - return []*celgo.Type{celgo.AnyType} -} - -var _ ExpressionAccessor = &ClaimValidationCondition{} - -// ClaimValidationCondition is a CEL expression that validates a claim. -type ClaimValidationCondition struct { - Expression string - Message string -} - -// GetExpression returns the CEL expression. -func (v *ClaimValidationCondition) GetExpression() string { - return v.Expression -} - -// ReturnTypes returns the CEL expression return types. -func (v *ClaimValidationCondition) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.BoolType} -} - -var _ ExpressionAccessor = &ExtraMappingExpression{} - -// ExtraMappingExpression is a CEL expression that maps an extra to a list of values. -type ExtraMappingExpression struct { - Key string - Expression string -} - -// GetExpression returns the CEL expression. -func (v *ExtraMappingExpression) GetExpression() string { - return v.Expression -} - -// ReturnTypes returns the CEL expression return types. -func (v *ExtraMappingExpression) ReturnTypes() []*celgo.Type { - // return types is only used for validation. The claims variable that's available - // to the claim mapping expressions is a map[string]interface{}, so we can't - // really know what the return type is during compilation. Strict type checking - // is done during evaluation. - return []*celgo.Type{celgo.AnyType} -} - -var _ ExpressionAccessor = &UserValidationCondition{} - -// UserValidationCondition is a CEL expression that validates a User. -type UserValidationCondition struct { - Expression string - Message string -} - -// GetExpression returns the CEL expression. -func (v *UserValidationCondition) GetExpression() string { - return v.Expression -} - -// ReturnTypes returns the CEL expression return types. -func (v *UserValidationCondition) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.BoolType} -} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go deleted file mode 100644 index ab308bb7f0f..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "fmt" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" -) - -var _ ClaimsMapper = &mapper{} -var _ UserMapper = &mapper{} - -// mapper implements the ClaimsMapper and UserMapper interface. -type mapper struct { - compilationResults []CompilationResult -} - -// CELMapper is a struct that holds the compiled expressions for -// username, groups, uid, extra, claimValidation and userValidation -type CELMapper struct { - Username ClaimsMapper - Groups ClaimsMapper - UID ClaimsMapper - Extra ClaimsMapper - ClaimValidationRules ClaimsMapper - UserValidationRules UserMapper -} - -// NewClaimsMapper returns a new ClaimsMapper. -func NewClaimsMapper(compilationResults []CompilationResult) ClaimsMapper { - return &mapper{ - compilationResults: compilationResults, - } -} - -// NewUserMapper returns a new UserMapper. -func NewUserMapper(compilationResults []CompilationResult) UserMapper { - return &mapper{ - compilationResults: compilationResults, - } -} - -// EvalClaimMapping evaluates the given claim mapping expression and returns a EvaluationResult. -func (m *mapper) EvalClaimMapping(ctx context.Context, claims *unstructured.Unstructured) (EvaluationResult, error) { - results, err := m.eval(ctx, map[string]interface{}{claimsVarName: claims.Object}) - if err != nil { - return EvaluationResult{}, err - } - if len(results) != 1 { - return EvaluationResult{}, fmt.Errorf("expected 1 evaluation result, got %d", len(results)) - } - return results[0], nil -} - -// EvalClaimMappings evaluates the given expressions and returns a list of EvaluationResult. -func (m *mapper) EvalClaimMappings(ctx context.Context, claims *unstructured.Unstructured) ([]EvaluationResult, error) { - return m.eval(ctx, map[string]interface{}{claimsVarName: claims.Object}) -} - -// EvalUser evaluates the given user expressions and returns a list of EvaluationResult. -func (m *mapper) EvalUser(ctx context.Context, userInfo *unstructured.Unstructured) ([]EvaluationResult, error) { - return m.eval(ctx, map[string]interface{}{userVarName: userInfo.Object}) -} - -func (m *mapper) eval(ctx context.Context, input map[string]interface{}) ([]EvaluationResult, error) { - evaluations := make([]EvaluationResult, len(m.compilationResults)) - - for i, compilationResult := range m.compilationResults { - var evaluation = &evaluations[i] - evaluation.ExpressionAccessor = compilationResult.ExpressionAccessor - - evalResult, _, err := compilationResult.Program.ContextEval(ctx, input) - if err != nil { - return nil, fmt.Errorf("expression '%s' resulted in error: %w", compilationResult.ExpressionAccessor.GetExpression(), err) - } - - evaluation.EvalResult = evalResult - } - - return evaluations, nil -} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go index d67c5354763..63010aadc2c 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -148,33 +148,6 @@ func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.R } } - /* - kubernetes mutual (2-way) x509 between client and apiserver: - - 1. apiserver sending its apiserver certificate along with its publickey to client - 2. client verifies the apiserver certificate sent against its cluster certificate authority data - 3. client sending its client certificate along with its public key to the apiserver - >4. apiserver verifies the client certificate sent against its cluster certificate authority data - - description: - here, with this function, - client certificate and pub key sent during the handshake process - are verified by apiserver against its cluster certificate authority data - - normal args related to this stage: - --client-ca-file string If set, any request presenting a client certificate signed by - one of the authorities in the client-ca-file is authenticated with an identity - corresponding to the CommonName of the client certificate. - - (retrievable from "kube-apiserver --help" command) - (suggested by @deads2k) - - see also: - - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go - - for the step 2, see: staging/src/k8s.io/client-go/transport/transport.go - - for the step 3, see: staging/src/k8s.io/client-go/transport/transport.go - */ - remaining := req.TLS.PeerCertificates[0].NotAfter.Sub(time.Now()) clientCertificateExpirationHistogram.WithContext(req.Context()).Observe(remaining.Seconds()) chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go index c55fe5d2ed6..f0dc0767639 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -36,21 +36,12 @@ const ( ServiceAccountUsernameSeparator = ":" ServiceAccountGroupPrefix = "system:serviceaccounts:" AllServiceAccountsGroup = "system:serviceaccounts" - // CredentialIDKey is the key used in a user's "extra" to specify the unique - // identifier for this identity document). - CredentialIDKey = "authentication.kubernetes.io/credential-id" // PodNameKey is the key used in a user's "extra" to specify the pod name of // the authenticating request. PodNameKey = "authentication.kubernetes.io/pod-name" // PodUIDKey is the key used in a user's "extra" to specify the pod UID of // the authenticating request. PodUIDKey = "authentication.kubernetes.io/pod-uid" - // NodeNameKey is the key used in a user's "extra" to specify the node name of - // the authenticating request. - NodeNameKey = "authentication.kubernetes.io/node-name" - // NodeUIDKey is the key used in a user's "extra" to specify the node UID of - // the authenticating request. - NodeUIDKey = "authentication.kubernetes.io/node-uid" ) // MakeUsername generates a username from the given namespace and ServiceAccount name. @@ -128,8 +119,6 @@ func UserInfo(namespace, name, uid string) user.Info { type ServiceAccountInfo struct { Name, Namespace, UID string PodName, PodUID string - CredentialID string - NodeName, NodeUID string } func (sa *ServiceAccountInfo) UserInfo() user.Info { @@ -138,43 +127,15 @@ func (sa *ServiceAccountInfo) UserInfo() user.Info { UID: sa.UID, Groups: MakeGroupNames(sa.Namespace), } - if sa.PodName != "" && sa.PodUID != "" { - if info.Extra == nil { - info.Extra = make(map[string][]string) - } - info.Extra[PodNameKey] = []string{sa.PodName} - info.Extra[PodUIDKey] = []string{sa.PodUID} - } - if sa.CredentialID != "" { - if info.Extra == nil { - info.Extra = make(map[string][]string) - } - info.Extra[CredentialIDKey] = []string{sa.CredentialID} - } - if sa.NodeName != "" { - if info.Extra == nil { - info.Extra = make(map[string][]string) - } - info.Extra[NodeNameKey] = []string{sa.NodeName} - // node UID is optional and will only be set if the node name is set - if sa.NodeUID != "" { - info.Extra[NodeUIDKey] = []string{sa.NodeUID} + info.Extra = map[string][]string{ + PodNameKey: {sa.PodName}, + PodUIDKey: {sa.PodUID}, } } - return info } -// CredentialIDForJTI converts a given JTI string into a credential identifier for use in a -// users 'extra' info. -func CredentialIDForJTI(jti string) string { - if len(jti) == 0 { - return "" - } - return "JTI=" + jti -} - // IsServiceAccountToken returns true if the secret is a valid api token for the service account func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { if secret.Type != v1.SecretTypeServiceAccountToken { diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go index a8355ee6191..d1ead25dbb2 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -54,7 +54,6 @@ func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { c.AllowCacheTTL, c.DenyCacheTTL, *c.WebhookRetryBackoff, - authorizer.DecisionNoOpinion, webhook.AuthorizerMetrics{ RecordRequestTotal: RecordRequestTotal, RecordRequestLatency: RecordRequestLatency, diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go deleted file mode 100644 index 0d9293dd704..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "fmt" - - "github.com/google/cel-go/cel" - "github.com/google/cel-go/common/types/ref" - - authorizationv1 "k8s.io/api/authorization/v1" - "k8s.io/apimachinery/pkg/util/version" - apiservercel "k8s.io/apiserver/pkg/cel" - "k8s.io/apiserver/pkg/cel/environment" -) - -const ( - subjectAccessReviewRequestVarName = "request" -) - -// CompilationResult represents a compiled authorization cel expression. -type CompilationResult struct { - Program cel.Program - ExpressionAccessor ExpressionAccessor -} - -// EvaluationResult contains the minimal required fields and metadata of a cel evaluation -type EvaluationResult struct { - EvalResult ref.Val - ExpressionAccessor ExpressionAccessor -} - -// Compiler is an interface for compiling CEL expressions with the desired environment mode. -type Compiler interface { - CompileCELExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) -} - -type compiler struct { - envSet *environment.EnvSet -} - -// NewCompiler returns a new Compiler. -func NewCompiler(env *environment.EnvSet) Compiler { - return &compiler{ - envSet: mustBuildEnv(env), - } -} - -func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { - resultError := func(errorString string, errType apiservercel.ErrorType) (CompilationResult, error) { - err := &apiservercel.Error{ - Type: errType, - Detail: errorString, - } - return CompilationResult{ - ExpressionAccessor: expressionAccessor, - }, err - } - env, err := c.envSet.Env(environment.StoredExpressions) - if err != nil { - return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) - } - ast, issues := env.Compile(expressionAccessor.GetExpression()) - if issues != nil { - return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) - } - found := false - returnTypes := expressionAccessor.ReturnTypes() - for _, returnType := range returnTypes { - if ast.OutputType() == returnType { - found = true - break - } - } - if !found { - var reason string - if len(returnTypes) == 1 { - reason = fmt.Sprintf("must evaluate to %v but got %v", returnTypes[0].String(), ast.OutputType()) - } else { - reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) - } - - return resultError(reason, apiservercel.ErrorTypeInvalid) - } - _, err = cel.AstToCheckedExpr(ast) - if err != nil { - // should be impossible since env.Compile returned no issues - return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) - } - prog, err := env.Program(ast) - if err != nil { - return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) - } - return CompilationResult{ - Program: prog, - ExpressionAccessor: expressionAccessor, - }, nil -} - -func mustBuildEnv(baseEnv *environment.EnvSet) *environment.EnvSet { - field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { - return apiservercel.NewDeclField(name, declType, required, nil, nil) - } - fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { - result := make(map[string]*apiservercel.DeclField, len(fields)) - for _, f := range fields { - result[f.Name] = f - } - return result - } - subjectAccessReviewSpecRequestType := buildRequestType(field, fields) - extended, err := baseEnv.Extend( - environment.VersionedOptions{ - // we record this as 1.0 since it was available in the - // first version that supported this feature - IntroducedVersion: version.MajorMinor(1, 0), - EnvOptions: []cel.EnvOption{ - cel.Variable(subjectAccessReviewRequestVarName, subjectAccessReviewSpecRequestType.CelType()), - }, - DeclTypes: []*apiservercel.DeclType{ - subjectAccessReviewSpecRequestType, - }, - }, - ) - if err != nil { - panic(fmt.Sprintf("environment misconfigured: %v", err)) - } - - return extended -} - -// buildRequestType generates a DeclType for SubjectAccessReviewSpec. -// if attributes are added here, also add to convertObjectToUnstructured. -func buildRequestType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { - resourceAttributesType := buildResourceAttributesType(field, fields) - nonResourceAttributesType := buildNonResourceAttributesType(field, fields) - return apiservercel.NewObjectType("kubernetes.SubjectAccessReviewSpec", fields( - field("resourceAttributes", resourceAttributesType, false), - field("nonResourceAttributes", nonResourceAttributesType, false), - field("user", apiservercel.StringType, false), - field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), - field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), - field("uid", apiservercel.StringType, false), - )) -} - -// buildResourceAttributesType generates a DeclType for ResourceAttributes. -// if attributes are added here, also add to convertObjectToUnstructured. -func buildResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { - return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields( - field("namespace", apiservercel.StringType, false), - field("verb", apiservercel.StringType, false), - field("group", apiservercel.StringType, false), - field("version", apiservercel.StringType, false), - field("resource", apiservercel.StringType, false), - field("subresource", apiservercel.StringType, false), - field("name", apiservercel.StringType, false), - )) -} - -// buildNonResourceAttributesType generates a DeclType for NonResourceAttributes. -// if attributes are added here, also add to convertObjectToUnstructured. -func buildNonResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { - return apiservercel.NewObjectType("kubernetes.NonResourceAttributes", fields( - field("path", apiservercel.StringType, false), - field("verb", apiservercel.StringType, false), - )) -} - -func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec) map[string]interface{} { - // Construct version containing every SubjectAccessReview user and string attribute field, even omitempty ones, for evaluation by CEL - extra := obj.Extra - if extra == nil { - extra = map[string]authorizationv1.ExtraValue{} - } - ret := map[string]interface{}{ - "user": obj.User, - "groups": obj.Groups, - "uid": string(obj.UID), - "extra": extra, - } - if obj.ResourceAttributes != nil { - ret["resourceAttributes"] = map[string]string{ - "namespace": obj.ResourceAttributes.Namespace, - "verb": obj.ResourceAttributes.Verb, - "group": obj.ResourceAttributes.Group, - "version": obj.ResourceAttributes.Version, - "resource": obj.ResourceAttributes.Resource, - "subresource": obj.ResourceAttributes.Subresource, - "name": obj.ResourceAttributes.Name, - } - } - if obj.NonResourceAttributes != nil { - ret["nonResourceAttributes"] = map[string]string{ - "verb": obj.NonResourceAttributes.Verb, - "path": obj.NonResourceAttributes.Path, - } - } - return ret -} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go deleted file mode 100644 index 82166830c87..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - celgo "github.com/google/cel-go/cel" -) - -type ExpressionAccessor interface { - GetExpression() string - ReturnTypes() []*celgo.Type -} - -var _ ExpressionAccessor = &SubjectAccessReviewMatchCondition{} - -// SubjectAccessReviewMatchCondition is a CEL expression that maps a SubjectAccessReview request to a list of values. -type SubjectAccessReviewMatchCondition struct { - Expression string -} - -func (v *SubjectAccessReviewMatchCondition) GetExpression() string { - return v.Expression -} - -func (v *SubjectAccessReviewMatchCondition) ReturnTypes() []*celgo.Type { - return []*celgo.Type{celgo.BoolType} -} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go deleted file mode 100644 index 30ce5b69c99..00000000000 --- a/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cel - -import ( - "context" - "fmt" - - celgo "github.com/google/cel-go/cel" - - authorizationv1 "k8s.io/api/authorization/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -type CELMatcher struct { - CompilationResults []CompilationResult -} - -// eval evaluates the given SubjectAccessReview against all cel matchCondition expression -func (c *CELMatcher) Eval(ctx context.Context, r *authorizationv1.SubjectAccessReview) (bool, error) { - var evalErrors []error - va := map[string]interface{}{ - "request": convertObjectToUnstructured(&r.Spec), - } - for _, compilationResult := range c.CompilationResults { - evalResult, _, err := compilationResult.Program.ContextEval(ctx, va) - if err != nil { - evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' resulted in error: %w", compilationResult.ExpressionAccessor.GetExpression(), err)) - continue - } - if evalResult.Type() != celgo.BoolType { - evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' eval result type should be bool but got %W", compilationResult.ExpressionAccessor.GetExpression(), evalResult.Type())) - continue - } - match, ok := evalResult.Value().(bool) - if !ok { - evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' eval result value should be bool but got %W", compilationResult.ExpressionAccessor.GetExpression(), evalResult.Value())) - continue - } - // If at least one matchCondition successfully evaluates to FALSE, - // return early - if !match { - return false, nil - } - } - // if there is any error, return - if len(evalErrors) > 0 { - return false, utilerrors.NewAggregate(evalErrors) - } - // return ALL matchConditions evaluate to TRUE successfully without error - return true, nil -} diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go b/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go index dd94e282f47..c28d6ce510a 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go @@ -56,27 +56,12 @@ type Schema interface { // Validations contains OpenAPI validation that the CEL library uses. type Validations interface { - Pattern() string - Minimum() *float64 - IsExclusiveMinimum() bool - Maximum() *float64 - IsExclusiveMaximum() bool - MultipleOf() *float64 - MinItems() *int64 MaxItems() *int64 - MinLength() *int64 MaxLength() *int64 - MinProperties() *int64 MaxProperties() *int64 Required() []string Enum() []any Nullable() bool - UniqueItems() bool - - AllOf() []Schema - OneOf() []Schema - AnyOf() []Schema - Not() Schema } // KubeExtensions contains Kubernetes-specific extensions to the OpenAPI schema. @@ -86,16 +71,6 @@ type KubeExtensions interface { IsXPreserveUnknownFields() bool XListType() string XListMapKeys() []string - XMapType() string - XValidations() []ValidationRule -} - -// ValidationRule represents a single x-kubernetes-validations rule. -type ValidationRule interface { - Rule() string - Message() string - MessageExpression() string - FieldPath() string } // SchemaOrBool contains either a schema or a boolean indicating if the object diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/equality.go b/vendor/k8s.io/apiserver/pkg/cel/common/equality.go deleted file mode 100644 index 9289637a395..00000000000 --- a/vendor/k8s.io/apiserver/pkg/cel/common/equality.go +++ /dev/null @@ -1,334 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "reflect" - "time" -) - -// CorrelatedObject represents a node in a tree of objects that are being -// validated. It is used to keep track of the old value of an object during -// traversal of the new value. It is also used to cache the results of -// DeepEqual comparisons between the old and new values of objects. -// -// All receiver functions support being called on `nil` to support ergonomic -// recursive descent. The nil `CorrelatedObject` represents an uncorrelatable -// node in the tree. -// -// CorrelatedObject is not thread-safe. It is the responsibility of the caller -// to handle concurrency, if any. -type CorrelatedObject struct { - // Currently correlated old value during traversal of the schema/object - OldValue interface{} - - // Value being validated - Value interface{} - - // Schema used for validation of this value. The schema is also used - // to determine how to correlate the old object. - Schema Schema - - // Duration spent on ratcheting validation for this object and all of its - // children. - Duration *time.Duration - - // Scratch space below, may change during validation - - // Cached comparison result of DeepEqual of `value` and `thunk.oldValue` - comparisonResult *bool - - // Cached map representation of a map-type list, or nil if not map-type list - mapList MapList - - // Children spawned by a call to `Validate` on this object - // key is either a string or an index, depending upon whether `value` is - // a map or a list, respectively. - // - // The list of children may be incomplete depending upon if the internal - // logic of kube-openapi's SchemaValidator short-circuited before - // reaching all of the children. - // - // It should be expected to have an entry for either all of the children, or - // none of them. - children map[interface{}]*CorrelatedObject -} - -func NewCorrelatedObject(new, old interface{}, schema Schema) *CorrelatedObject { - d := time.Duration(0) - return &CorrelatedObject{ - OldValue: old, - Value: new, - Schema: schema, - Duration: &d, - } -} - -// If OldValue or Value is not a list, or the index is out of bounds of the -// Value list, returns nil -// If oldValue is a list, this considers the x-list-type to decide how to -// correlate old values: -// -// If listType is map, creates a map representation of the list using the designated -// map-keys, caches it for future calls, and returns the map value, or nil if -// the correlated key is not in the old map -// -// Otherwise, if the list type is not correlatable this funcion returns nil. -func (r *CorrelatedObject) correlateOldValueForChildAtNewIndex(index int) interface{} { - oldAsList, ok := r.OldValue.([]interface{}) - if !ok { - return nil - } - - asList, ok := r.Value.([]interface{}) - if !ok { - return nil - } else if len(asList) <= index { - // Cannot correlate out of bounds index - return nil - } - - listType := r.Schema.XListType() - switch listType { - case "map": - // Look up keys for this index in current object - currentElement := asList[index] - - oldList := r.mapList - if oldList == nil { - oldList = MakeMapList(r.Schema, oldAsList) - r.mapList = oldList - } - return oldList.Get(currentElement) - - case "set": - // Are sets correlatable? Only if the old value equals the current value. - // We might be able to support this, but do not currently see a lot - // of value - // (would allow you to add/remove items from sets with ratcheting but not change them) - return nil - case "": - fallthrough - case "atomic": - // Atomic lists are the default are not correlatable by item - // Ratcheting is not available on a per-index basis - return nil - default: - // Unrecognized list type. Assume non-correlatable. - return nil - } -} - -// CachedDeepEqual is equivalent to reflect.DeepEqual, but caches the -// results in the tree of ratchetInvocationScratch objects on the way: -// -// For objects and arrays, this function will make a best effort to make -// use of past DeepEqual checks performed by this Node's children, if available. -// -// If a lazy computation could not be found for all children possibly due -// to validation logic short circuiting and skipping the children, then -// this function simply defers to reflect.DeepEqual. -func (r *CorrelatedObject) CachedDeepEqual() (res bool) { - start := time.Now() - defer func() { - if r != nil && r.Duration != nil { - *r.Duration += time.Since(start) - } - }() - - if r == nil { - // Uncorrelatable node is not considered equal to its old value - return false - } else if r.comparisonResult != nil { - return *r.comparisonResult - } - - defer func() { - r.comparisonResult = &res - }() - - if r.Value == nil && r.OldValue == nil { - return true - } else if r.Value == nil || r.OldValue == nil { - return false - } - - oldAsArray, oldIsArray := r.OldValue.([]interface{}) - newAsArray, newIsArray := r.Value.([]interface{}) - - oldAsMap, oldIsMap := r.OldValue.(map[string]interface{}) - newAsMap, newIsMap := r.Value.(map[string]interface{}) - - // If old and new are not the same type, they are not equal - if (oldIsArray != newIsArray) || oldIsMap != newIsMap { - return false - } - - // Objects are known to be same type of (map, slice, or primitive) - switch { - case oldIsArray: - // Both arrays case. oldIsArray == newIsArray - if len(oldAsArray) != len(newAsArray) { - return false - } - - for i := range newAsArray { - child := r.Index(i) - if child == nil { - if r.mapList == nil { - // Treat non-correlatable array as a unit with reflect.DeepEqual - return reflect.DeepEqual(oldAsArray, newAsArray) - } - - // If array is correlatable, but old not found. Just short circuit - // comparison - return false - - } else if !child.CachedDeepEqual() { - // If one child is not equal the entire object is not equal - return false - } - } - - return true - case oldIsMap: - // Both maps case. oldIsMap == newIsMap - if len(oldAsMap) != len(newAsMap) { - return false - } - - for k := range newAsMap { - child := r.Key(k) - if child == nil { - // Un-correlatable child due to key change. - // Objects are not equal. - return false - } else if !child.CachedDeepEqual() { - // If one child is not equal the entire object is not equal - return false - } - } - - return true - - default: - // Primitive: use reflect.DeepEqual - return reflect.DeepEqual(r.OldValue, r.Value) - } -} - -// Key returns the child of the receiver with the given name. -// Returns nil if the given name is does not exist in the new object, or its -// value is not correlatable to an old value. -// If receiver is nil or if the new value is not an object/map, returns nil. -func (r *CorrelatedObject) Key(field string) *CorrelatedObject { - start := time.Now() - defer func() { - if r != nil && r.Duration != nil { - *r.Duration += time.Since(start) - } - }() - - if r == nil || r.Schema == nil { - return nil - } else if existing, exists := r.children[field]; exists { - return existing - } - - // Find correlated old value - oldAsMap, okOld := r.OldValue.(map[string]interface{}) - newAsMap, okNew := r.Value.(map[string]interface{}) - if !okOld || !okNew { - return nil - } - - oldValueForField, okOld := oldAsMap[field] - newValueForField, okNew := newAsMap[field] - if !okOld || !okNew { - return nil - } - - var propertySchema Schema - if prop, exists := r.Schema.Properties()[field]; exists { - propertySchema = prop - } else if addP := r.Schema.AdditionalProperties(); addP != nil && addP.Schema() != nil { - propertySchema = addP.Schema() - } else { - return nil - } - - if r.children == nil { - r.children = make(map[interface{}]*CorrelatedObject, len(newAsMap)) - } - - res := &CorrelatedObject{ - OldValue: oldValueForField, - Value: newValueForField, - Schema: propertySchema, - Duration: r.Duration, - } - r.children[field] = res - return res -} - -// Index returns the child of the receiver at the given index. -// Returns nil if the given index is out of bounds, or its value is not -// correlatable to an old value. -// If receiver is nil or if the new value is not an array, returns nil. -func (r *CorrelatedObject) Index(i int) *CorrelatedObject { - start := time.Now() - defer func() { - if r != nil && r.Duration != nil { - *r.Duration += time.Since(start) - } - }() - - if r == nil || r.Schema == nil { - return nil - } else if existing, exists := r.children[i]; exists { - return existing - } - - asList, ok := r.Value.([]interface{}) - if !ok || len(asList) <= i { - return nil - } - - oldValueForIndex := r.correlateOldValueForChildAtNewIndex(i) - if oldValueForIndex == nil { - return nil - } - var itemSchema Schema - if i := r.Schema.Items(); i != nil { - itemSchema = i - } else { - return nil - } - - if r.children == nil { - r.children = make(map[interface{}]*CorrelatedObject, len(asList)) - } - - res := &CorrelatedObject{ - OldValue: oldValueForIndex, - Value: asList[i], - Schema: itemSchema, - Duration: r.Duration, - } - r.children[i] = res - return res -} diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go b/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go index 19392babeb2..3fdd3a6c8ba 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go @@ -165,11 +165,7 @@ func SchemaDeclType(s Schema, isResourceRoot bool) *apiservercel.DeclType { // unicode code point can be up to 4 bytes long) strWithMaxLength.MaxElements = zeroIfNegative(*s.MaxLength()) * 4 } else { - if len(s.Enum()) > 0 { - strWithMaxLength.MaxElements = estimateMaxStringEnumLength(s) - } else { - strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s) - } + strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s) } return strWithMaxLength case "boolean": @@ -243,19 +239,6 @@ func estimateMaxStringLengthPerRequest(s Schema) int64 { } } -// estimateMaxStringLengthPerRequest estimates the maximum string length (in characters) -// that has a set of enum values. -// The result of the estimation is the length of the longest possible value. -func estimateMaxStringEnumLength(s Schema) int64 { - var maxLength int64 - for _, v := range s.Enum() { - if s, ok := v.(string); ok && int64(len(s)) > maxLength { - maxLength = int64(len(s)) - } - } - return maxLength -} - // estimateMaxArrayItemsPerRequest estimates the maximum number of array items with // the provided minimum serialized size that can fit into a single request. func estimateMaxArrayItemsFromMinSize(minSize int64) int64 { diff --git a/vendor/k8s.io/apiserver/pkg/cel/common/values.go b/vendor/k8s.io/apiserver/pkg/cel/common/values.go index c8279f01371..d9034a80fb2 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/common/values.go +++ b/vendor/k8s.io/apiserver/pkg/cel/common/values.go @@ -84,22 +84,18 @@ func UnstructuredToVal(unstructured interface{}, schema Schema) ref.Val { }, } } - - // properties and additionalProperties are mutual exclusive, but nothing prevents the situation - // where both are missing. - // An object that (1) has no properties (2) has no additionalProperties or additionalProperties == false - // is treated as an empty object. - // An object that has additionalProperties == true is treated as an unstructured map. - // An object that has x-kubernetes-preserve-unknown-field extension set is treated as an unstructured map. - // Empty object vs unstructured map is differentiated by unstructuredMap implementation with the set schema. - // The resulting result remains the same. - return &unstructuredMap{ - value: m, - schema: schema, - propSchema: func(key string) (Schema, bool) { - return nil, false - }, + // A object with x-kubernetes-preserve-unknown-fields but no properties or additionalProperties is treated + // as an empty object. + if schema.IsXPreserveUnknownFields() { + return &unstructuredMap{ + value: m, + schema: schema, + propSchema: func(key string) (Schema, bool) { + return nil, false + }, + } } + return types.NewErr("invalid object type, expected either Properties or AdditionalProperties with Allows=true and non-empty Schema") } if schema.Type() == "array" { diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go index 0c1dee82dc5..ed0d3404116 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -22,9 +22,7 @@ import ( "sync" "github.com/google/cel-go/cel" - "github.com/google/cel-go/checker" "github.com/google/cel-go/ext" - "github.com/google/cel-go/interpreter" "golang.org/x/sync/singleflight" "k8s.io/apimachinery/pkg/util/version" @@ -43,7 +41,7 @@ import ( // desirable because it means that CEL expressions are portable across a wider range // of Kubernetes versions. func DefaultCompatibilityVersion() *version.Version { - return version.MajorMinor(1, 28) + return version.MajorMinor(1, 27) } var baseOpts = []VersionedOptions{ @@ -59,21 +57,14 @@ var baseOpts = []VersionedOptions{ cel.EagerlyValidateDeclarations(true), cel.DefaultUTCTimeZone(true), + ext.Strings(ext.StringsVersion(0)), library.URLs(), library.Regex(), library.Lists(), - - // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. - // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. - cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), }, ProgramOptions: []cel.ProgramOption{ cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost), cel.CostLimit(celconfig.PerCallLimit), - - // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. - // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. - cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), }, }, { @@ -90,39 +81,7 @@ var baseOpts = []VersionedOptions{ library.Quantity(), }, }, - // add the new validator in 1.29 - { - IntroducedVersion: version.MajorMinor(1, 29), - EnvOptions: []cel.EnvOption{ - cel.ASTValidators( - cel.ValidateDurationLiterals(), - cel.ValidateTimestampLiterals(), - cel.ValidateRegexLiterals(), - cel.ValidateHomogeneousAggregateLiterals(), - ), - }, - }, - // String library - { - IntroducedVersion: version.MajorMinor(1, 0), - RemovedVersion: version.MajorMinor(1, 29), - EnvOptions: []cel.EnvOption{ - ext.Strings(ext.StringsVersion(0)), - }, - }, - { - IntroducedVersion: version.MajorMinor(1, 29), - EnvOptions: []cel.EnvOption{ - ext.Strings(ext.StringsVersion(2)), - }, - }, - // Set library - { - IntroducedVersion: version.MajorMinor(1, 29), - EnvOptions: []cel.EnvOption{ - ext.Sets(), - }, - }, + // TODO: switch to ext.Strings version 2 once format() is fixed to work with HomogeneousAggregateLiterals. } // MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics diff --git a/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go b/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go index 16183050d9b..1742deb0a2f 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go +++ b/vendor/k8s.io/apiserver/pkg/cel/lazy/lazy.go @@ -35,7 +35,7 @@ var _ traits.Mapper = (*MapValue)(nil) // MapValue is a map that lazily evaluate its value when a field is first accessed. // The map value is not designed to be thread-safe. type MapValue struct { - typeValue *types.Type + typeValue *types.TypeValue // values are previously evaluated values obtained from callbacks values map[string]ref.Val diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go index df4bf080714..00f0200e865 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go @@ -202,10 +202,6 @@ var authzLib = &authz{} type authz struct{} -func (*authz) LibraryName() string { - return "k8s.authz" -} - var authzLibraryDecls = map[string][]cel.FunctionOpt{ "path": { cel.MemberOverload("authorizer_path", []*cel.Type{AuthorizerType, cel.StringType}, PathCheckType, @@ -582,7 +578,7 @@ type decisionVal struct { // any object type that has receiver functions but does not expose any fields to // CEL. type receiverOnlyObjectVal struct { - typeValue *types.Type + typeValue *types.TypeValue } // receiverOnlyVal returns a receiverOnlyObjectVal for the given type. diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go index d18c138ec8f..3d1b3fbb2bc 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go @@ -101,8 +101,8 @@ func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *ch // If the list contains strings or bytes, add the cost of traversing all the strings/bytes as a way // of estimating the additional comparison cost. if elNode := l.listElementNode(*target); elNode != nil { - k := elNode.Type().Kind() - if k == types.StringKind || k == types.BytesKind { + t := elNode.Type().GetPrimitive() + if t == exprpb.Type_STRING || t == exprpb.Type_BYTES { sz := l.sizeEstimate(elNode) elCost = elCost.Add(sz.MultiplyByCostFactor(common.StringTraversalCostFactor)) } @@ -247,8 +247,7 @@ func (l *CostEstimator) sizeEstimate(t checker.AstNode) checker.SizeEstimate { } func (l *CostEstimator) listElementNode(list checker.AstNode) checker.AstNode { - if params := list.Type().Parameters(); len(params) > 0 { - lt := params[0] + if lt := list.Type().GetListType(); lt != nil { nodePath := list.Path() if nodePath != nil { // Provide path if we have it so that a OpenAPIv3 maxLength validation can be looked up, if it exists @@ -256,10 +255,10 @@ func (l *CostEstimator) listElementNode(list checker.AstNode) checker.AstNode { path := make([]string, len(nodePath)+1) copy(path, nodePath) path[len(nodePath)] = "@items" - return &itemsNode{path: path, t: lt, expr: nil} + return &itemsNode{path: path, t: lt.GetElemType(), expr: nil} } else { // Provide just the type if no path is available so that worst case size can be looked up based on type. - return &itemsNode{t: lt, expr: nil} + return &itemsNode{t: lt.GetElemType(), expr: nil} } } return nil @@ -274,7 +273,7 @@ func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstim type itemsNode struct { path []string - t *types.Type + t *exprpb.Type expr *exprpb.Expr } @@ -282,7 +281,7 @@ func (i *itemsNode) Path() []string { return i.path } -func (i *itemsNode) Type() *types.Type { +func (i *itemsNode) Type() *exprpb.Type { return i.t } diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go index 327ec93d6e2..fe51dc87fdb 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go @@ -95,10 +95,6 @@ var listsLib = &lists{} type lists struct{} -func (*lists) LibraryName() string { - return "k8s.lists" -} - var paramA = cel.TypeParamType("A") // CEL typeParams can be used to constraint to a specific trait (e.g. traits.ComparableType) if the 1st operand is the type to constrain. diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go index b4ac91c8a72..49e3dae7cdb 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go @@ -22,7 +22,6 @@ import ( "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" - "k8s.io/apimachinery/pkg/api/resource" apiservercel "k8s.io/apiserver/pkg/cel" ) @@ -142,10 +141,6 @@ var quantityLib = &quantity{} type quantity struct{} -func (*quantity) LibraryName() string { - return "k8s.quantity" -} - var quantityLibraryDecls = map[string][]cel.FunctionOpt{ "quantity": { cel.Overload("string_to_quantity", []*cel.Type{cel.StringType}, apiservercel.QuantityType, cel.UnaryBinding((stringToQuantity))), diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go index 147a40f9bd2..17fb3d44c97 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go @@ -51,10 +51,6 @@ var regexLib = ®ex{} type regex struct{} -func (*regex) LibraryName() string { - return "k8s.regex" -} - var regexLibraryDecls = map[string][]cel.FunctionOpt{ "find": { cel.MemberOverload("string_find_string", []*cel.Type{cel.StringType, cel.StringType}, cel.StringType, diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/vendor/k8s.io/apiserver/pkg/cel/library/test.go index dcbc058a110..95446f63c6b 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/test.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/test.go @@ -37,10 +37,6 @@ type testLib struct { version uint32 } -func (*testLib) LibraryName() string { - return "k8s.test" -} - type TestOption func(*testLib) *testLib func TestVersion(version uint32) func(lib *testLib) *testLib { diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go index 8f4ba85af7c..7be054ece37 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go +++ b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go @@ -112,10 +112,6 @@ var urlsLib = &urls{} type urls struct{} -func (*urls) LibraryName() string { - return "k8s.urls" -} - var urlLibraryDecls = map[string][]cel.FunctionOpt{ "url": { cel.Overload("string_to_url", []*cel.Type{cel.StringType}, apiservercel.URLType, diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go index bc7b0d8c959..0e2cc6e2b2e 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go @@ -54,10 +54,6 @@ func (s *Schema) Format() string { return s.Schema.Format } -func (s *Schema) Pattern() string { - return s.Schema.Pattern -} - func (s *Schema) Items() common.Schema { if s.Schema.Items == nil || s.Schema.Items.Schema == nil { return nil @@ -90,50 +86,14 @@ func (s *Schema) Default() any { return s.Schema.Default } -func (s *Schema) Minimum() *float64 { - return s.Schema.Minimum -} - -func (s *Schema) IsExclusiveMinimum() bool { - return s.Schema.ExclusiveMinimum -} - -func (s *Schema) Maximum() *float64 { - return s.Schema.Maximum -} - -func (s *Schema) IsExclusiveMaximum() bool { - return s.Schema.ExclusiveMaximum -} - -func (s *Schema) MultipleOf() *float64 { - return s.Schema.MultipleOf -} - -func (s *Schema) UniqueItems() bool { - return s.Schema.UniqueItems -} - -func (s *Schema) MinItems() *int64 { - return s.Schema.MinItems -} - func (s *Schema) MaxItems() *int64 { return s.Schema.MaxItems } -func (s *Schema) MinLength() *int64 { - return s.Schema.MinLength -} - func (s *Schema) MaxLength() *int64 { return s.Schema.MaxLength } -func (s *Schema) MinProperties() *int64 { - return s.Schema.MinProperties -} - func (s *Schema) MaxProperties() *int64 { return s.Schema.MaxProperties } @@ -150,40 +110,6 @@ func (s *Schema) Nullable() bool { return s.Schema.Nullable } -func (s *Schema) AllOf() []common.Schema { - var res []common.Schema - for _, nestedSchema := range s.Schema.AllOf { - nestedSchema := nestedSchema - res = append(res, &Schema{&nestedSchema}) - } - return res -} - -func (s *Schema) AnyOf() []common.Schema { - var res []common.Schema - for _, nestedSchema := range s.Schema.AnyOf { - nestedSchema := nestedSchema - res = append(res, &Schema{&nestedSchema}) - } - return res -} - -func (s *Schema) OneOf() []common.Schema { - var res []common.Schema - for _, nestedSchema := range s.Schema.OneOf { - nestedSchema := nestedSchema - res = append(res, &Schema{&nestedSchema}) - } - return res -} - -func (s *Schema) Not() common.Schema { - if s.Schema.Not == nil { - return nil - } - return &Schema{s.Schema.Not} -} - func (s *Schema) IsXIntOrString() bool { return isXIntOrString(s.Schema) } @@ -200,18 +126,10 @@ func (s *Schema) XListType() string { return getXListType(s.Schema) } -func (s *Schema) XMapType() string { - return getXMapType(s.Schema) -} - func (s *Schema) XListMapKeys() []string { return getXListMapKeys(s.Schema) } -func (s *Schema) XValidations() []common.ValidationRule { - return getXValidations(s.Schema) -} - func (s *Schema) WithTypeAndObjectMeta() common.Schema { return &Schema{common.WithTypeAndObjectMeta(s.Schema)} } diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go index 3bb3bccf057..6a2f830320b 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go @@ -18,7 +18,6 @@ package openapi import ( "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apiserver/pkg/cel/common" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -48,11 +47,6 @@ func getXListType(schema *spec.Schema) string { return s } -func getXMapType(schema *spec.Schema) string { - s, _ := schema.Extensions.GetString(extMapType) - return s -} - func getXListMapKeys(schema *spec.Schema) []string { mapKeys, ok := schema.Extensions.GetStringSlice(extListMapKeys) if !ok { @@ -61,47 +55,8 @@ func getXListMapKeys(schema *spec.Schema) []string { return mapKeys } -type ValidationRule struct { - RuleField string `json:"rule"` - MessageField string `json:"message"` - MessageExpressionField string `json:"messageExpression"` - PathField string `json:"fieldPath"` -} - -func (v ValidationRule) Rule() string { - return v.RuleField -} - -func (v ValidationRule) Message() string { - return v.MessageField -} - -func (v ValidationRule) FieldPath() string { - return v.PathField -} - -func (v ValidationRule) MessageExpression() string { - return v.MessageExpressionField -} - -// TODO: simplify -func getXValidations(schema *spec.Schema) []common.ValidationRule { - var rules []ValidationRule - err := schema.Extensions.GetObject(extValidations, &rules) - if err != nil { - return nil - } - results := make([]common.ValidationRule, len(rules)) - for i, rule := range rules { - results[i] = rule - } - return results -} - const extIntOrString = "x-kubernetes-int-or-string" const extEmbeddedResource = "x-kubernetes-embedded-resource" const extPreserveUnknownFields = "x-kubernetes-preserve-unknown-fields" const extListType = "x-kubernetes-list-type" -const extMapType = "x-kubernetes-map-type" const extListMapKeys = "x-kubernetes-list-map-keys" -const extValidations = "x-kubernetes-validations" diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go deleted file mode 100644 index eb3c3763556..00000000000 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/combined.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resolver - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Combine combines the DefinitionsSchemaResolver with a secondary schema resolver. -// The resulting schema resolver uses the DefinitionsSchemaResolver for a GVK that DefinitionsSchemaResolver knows, -// and the secondary otherwise. -func (d *DefinitionsSchemaResolver) Combine(secondary SchemaResolver) SchemaResolver { - return &combinedSchemaResolver{definitions: d, secondary: secondary} -} - -type combinedSchemaResolver struct { - definitions *DefinitionsSchemaResolver - secondary SchemaResolver -} - -// ResolveSchema takes a GroupVersionKind (GVK) and returns the OpenAPI schema -// identified by the GVK. -// If the DefinitionsSchemaResolver knows the gvk, the DefinitionsSchemaResolver handles the resolution, -// otherwise, the secondary does. -func (r *combinedSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { - if _, ok := r.definitions.gvkToRef[gvk]; ok { - return r.definitions.ResolveSchema(gvk) - } - return r.secondary.ResolveSchema(gvk) -} diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go index 12b353b0bcc..df7357f7785 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/definitions.go @@ -29,39 +29,40 @@ import ( // DefinitionsSchemaResolver resolves the schema of a built-in type // by looking up the OpenAPI definitions. type DefinitionsSchemaResolver struct { - defs map[string]common.OpenAPIDefinition - gvkToRef map[schema.GroupVersionKind]string + defs map[string]common.OpenAPIDefinition + gvkToSchema map[schema.GroupVersionKind]*spec.Schema } // NewDefinitionsSchemaResolver creates a new DefinitionsSchemaResolver. // An example working setup: -// getDefinitions = "k8s.io/kubernetes/pkg/generated/openapi".GetOpenAPIDefinitions // scheme = "k8s.io/client-go/kubernetes/scheme".Scheme -func NewDefinitionsSchemaResolver(getDefinitions common.GetOpenAPIDefinitions, schemes ...*runtime.Scheme) *DefinitionsSchemaResolver { - gvkToRef := make(map[schema.GroupVersionKind]string) - namer := openapi.NewDefinitionNamer(schemes...) +// getDefinitions = "k8s.io/kubernetes/pkg/generated/openapi".GetOpenAPIDefinitions +func NewDefinitionsSchemaResolver(scheme *runtime.Scheme, getDefinitions common.GetOpenAPIDefinitions) *DefinitionsSchemaResolver { + gvkToSchema := make(map[schema.GroupVersionKind]*spec.Schema) + namer := openapi.NewDefinitionNamer(scheme) defs := getDefinitions(func(path string) spec.Ref { return spec.MustCreateRef(path) }) - for name := range defs { + for name, def := range defs { _, e := namer.GetDefinitionName(name) gvks := extensionsToGVKs(e) + s := def.Schema // map value not addressable, make copy for _, gvk := range gvks { - gvkToRef[gvk] = name + gvkToSchema[gvk] = &s } } return &DefinitionsSchemaResolver{ - gvkToRef: gvkToRef, - defs: defs, + gvkToSchema: gvkToSchema, + defs: defs, } } func (d *DefinitionsSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) (*spec.Schema, error) { - ref, ok := d.gvkToRef[gvk] + s, ok := d.gvkToSchema[gvk] if !ok { return nil, fmt.Errorf("cannot resolve %v: %w", gvk, ErrSchemaNotFound) } - s, err := PopulateRefs(func(ref string) (*spec.Schema, bool) { + s, err := populateRefs(func(ref string) (*spec.Schema, bool) { // find the schema by the ref string, and return a deep copy def, ok := d.defs[ref] if !ok { @@ -69,7 +70,7 @@ func (d *DefinitionsSchemaResolver) ResolveSchema(gvk schema.GroupVersionKind) ( } s := def.Schema return &s, true - }, ref) + }, s) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go index 9c6cefce8de..53cbc7054b3 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/discovery.go @@ -53,34 +53,34 @@ func (r *ClientDiscoveryResolver) ResolveSchema(gvk schema.GroupVersionKind) (*s if err != nil { return nil, err } - ref, err := resolveRef(resp, gvk) + s, err := resolveType(resp, gvk) if err != nil { return nil, err } - s, err := PopulateRefs(func(ref string) (*spec.Schema, bool) { + s, err = populateRefs(func(ref string) (*spec.Schema, bool) { s, ok := resp.Components.Schemas[strings.TrimPrefix(ref, refPrefix)] return s, ok - }, ref) + }, s) if err != nil { return nil, err } return s, nil } -func resolveRef(resp *schemaResponse, gvk schema.GroupVersionKind) (string, error) { - for ref, s := range resp.Components.Schemas { +func resolveType(resp *schemaResponse, gvk schema.GroupVersionKind) (*spec.Schema, error) { + for _, s := range resp.Components.Schemas { var gvks []schema.GroupVersionKind err := s.Extensions.GetObject(extGVK, &gvks) if err != nil { - return "", err + return nil, err } for _, g := range gvks { if g == gvk { - return ref, nil + return s, nil } } } - return "", fmt.Errorf("cannot resolve group version kind %q: %w", gvk, ErrSchemaNotFound) + return nil, fmt.Errorf("cannot resolve group version kind %q: %w", gvk, ErrSchemaNotFound) } func resourcePathFromGV(gv schema.GroupVersion) string { diff --git a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go index 56e2a4bbd3a..49321bab47d 100644 --- a/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go +++ b/vendor/k8s.io/apiserver/pkg/cel/openapi/resolver/refs.go @@ -19,41 +19,19 @@ package resolver import ( "fmt" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kube-openapi/pkg/validation/spec" ) -// PopulateRefs recursively replaces Refs in the schema with the referred one. +// populateRefs recursively replaces Refs in the schema with the referred one. // schemaOf is the callback to find the corresponding schema by the ref. // This function will not mutate the original schema. If the schema needs to be // mutated, a copy will be returned, otherwise it returns the original schema. -func PopulateRefs(schemaOf func(ref string) (*spec.Schema, bool), rootRef string) (*spec.Schema, error) { - visitedRefs := sets.New[string]() - rootSchema, ok := schemaOf(rootRef) - visitedRefs.Insert(rootRef) - if !ok { - return nil, fmt.Errorf("internal error: cannot resolve Ref for root schema %q: %w", rootRef, ErrSchemaNotFound) - } - return populateRefs(schemaOf, visitedRefs, rootSchema) -} - -func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), visited sets.Set[string], schema *spec.Schema) (*spec.Schema, error) { +func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), schema *spec.Schema) (*spec.Schema, error) { result := *schema changed := false ref, isRef := refOf(schema) if isRef { - if visited.Has(ref) { - return &spec.Schema{ - // for circular ref, return an empty object as placeholder - SchemaProps: spec.SchemaProps{Type: []string{"object"}}, - }, nil - } - visited.Insert(ref) - // restore visited state at the end of the recursion. - defer func() { - visited.Delete(ref) - }() // replace the whole schema with the referred one. resolved, ok := schemaOf(ref) if !ok { @@ -66,7 +44,7 @@ func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), visited sets.S props := make(map[string]spec.Schema, len(schema.Properties)) propsChanged := false for name, prop := range result.Properties { - populated, err := populateRefs(schemaOf, visited, &prop) + populated, err := populateRefs(schemaOf, &prop) if err != nil { return nil, err } @@ -80,7 +58,7 @@ func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), visited sets.S result.Properties = props } if result.AdditionalProperties != nil && result.AdditionalProperties.Schema != nil { - populated, err := populateRefs(schemaOf, visited, result.AdditionalProperties.Schema) + populated, err := populateRefs(schemaOf, result.AdditionalProperties.Schema) if err != nil { return nil, err } @@ -91,7 +69,7 @@ func populateRefs(schemaOf func(ref string) (*spec.Schema, bool), visited sets.S } // schema is a list, populate its items if result.Items != nil && result.Items.Schema != nil { - populated, err := populateRefs(schemaOf, visited, result.Items.Schema) + populated, err := populateRefs(schemaOf, result.Items.Schema) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index a6d293a1590..4803975a735 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -164,7 +164,7 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. req = req.WithContext(request.WithUser(ctx, newUser)) oldUser, _ := request.UserFrom(ctx) - httplog.LogOf(req, w).Addf("%v is impersonating %v", userString(oldUser), userString(newUser)) + httplog.LogOf(req, w).Addf("%v is acting as %v", oldUser, newUser) ae := audit.AuditEventFrom(ctx) audit.LogImpersonatedUser(ae, newUser) @@ -183,24 +183,6 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. }) } -func userString(u user.Info) string { - if u == nil { - return "" - } - b := strings.Builder{} - if name := u.GetName(); name == "" { - b.WriteString("") - } else { - b.WriteString(name) - } - if groups := u.GetGroups(); len(groups) > 0 { - b.WriteString("[") - b.WriteString(strings.Join(groups, ",")) - b.WriteString("]") - } - return b.String() -} - func unescapeExtraKey(encodedKey string) string { key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes. if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go index 1ecf59d4543..67a1790c56a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/traces.go @@ -20,7 +20,6 @@ import ( "net/http" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" tracing "k8s.io/component-base/tracing" @@ -33,15 +32,7 @@ func WithTracing(handler http.Handler, tp trace.TracerProvider) http.Handler { otelhttp.WithPublicEndpoint(), otelhttp.WithTracerProvider(tp), } - wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Add the http.target attribute to the otelhttp span - // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 - if r.URL != nil { - trace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) - } - handler.ServeHTTP(w, r) - }) // With Noop TracerProvider, the otelhttp still handles context propagation. // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough - return otelhttp.NewHandler(wrappedHandler, "KubernetesAPI", opts...) + return otelhttp.NewHandler(handler, "KubernetesAPI", opts...) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go index d3b501cf52a..c110964fc42 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/get.go @@ -267,7 +267,7 @@ func ListResource(r rest.Lister, rw rest.Watcher, scope *RequestScope, forceWatc } requestInfo, _ := request.RequestInfoFrom(ctx) metrics.RecordLongRunning(req, requestInfo, metrics.APIServerComponent, func() { - serveWatch(watcher, scope, outputMediaType, req, w, timeout, metrics.CleanListScope(ctx, &opts)) + serveWatch(watcher, scope, outputMediaType, req, w, timeout) }) return } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go index 2c2d3e4824b..7f85563699d 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/helpers.go @@ -77,96 +77,6 @@ func (lazy *lazyAccept) String() string { return "unknown" } -// lazyAPIGroup implements String() string and it will -// lazily get Group from request info. -type lazyAPIGroup struct { - req *http.Request -} - -func (lazy *lazyAPIGroup) String() string { - if lazy.req != nil { - ctx := lazy.req.Context() - requestInfo, ok := apirequest.RequestInfoFrom(ctx) - if ok { - return requestInfo.APIGroup - } - } - - return "unknown" -} - -// lazyAPIVersion implements String() string and it will -// lazily get Group from request info. -type lazyAPIVersion struct { - req *http.Request -} - -func (lazy *lazyAPIVersion) String() string { - if lazy.req != nil { - ctx := lazy.req.Context() - requestInfo, ok := apirequest.RequestInfoFrom(ctx) - if ok { - return requestInfo.APIVersion - } - } - - return "unknown" -} - -// lazyName implements String() string and it will -// lazily get Group from request info. -type lazyName struct { - req *http.Request -} - -func (lazy *lazyName) String() string { - if lazy.req != nil { - ctx := lazy.req.Context() - requestInfo, ok := apirequest.RequestInfoFrom(ctx) - if ok { - return requestInfo.Name - } - } - - return "unknown" -} - -// lazySubresource implements String() string and it will -// lazily get Group from request info. -type lazySubresource struct { - req *http.Request -} - -func (lazy *lazySubresource) String() string { - if lazy.req != nil { - ctx := lazy.req.Context() - requestInfo, ok := apirequest.RequestInfoFrom(ctx) - if ok { - return requestInfo.Subresource - } - } - - return "unknown" -} - -// lazyNamespace implements String() string and it will -// lazily get Group from request info. -type lazyNamespace struct { - req *http.Request -} - -func (lazy *lazyNamespace) String() string { - if lazy.req != nil { - ctx := lazy.req.Context() - requestInfo, ok := apirequest.RequestInfoFrom(ctx) - if ok { - return requestInfo.Namespace - } - } - - return "unknown" -} - // lazyAuditID implements Stringer interface to lazily retrieve // the audit ID associated with the request. type lazyAuditID struct { diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go index 57766924c19..cf3205a9a93 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/metrics/metrics.go @@ -18,10 +18,7 @@ package metrics import ( "context" - "sync" - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" ) type RequestBodyVerb string @@ -38,8 +35,8 @@ var ( RequestBodySizes = metrics.NewHistogramVec( &metrics.HistogramOpts{ Subsystem: "apiserver", - Name: "request_body_size_bytes", - Help: "Apiserver request body size in bytes broken out by resource and verb.", + Name: "request_body_sizes", + Help: "Apiserver request body sizes broken out by size.", // we use 0.05 KB as the smallest bucket with 0.1 KB increments up to the // apiserver limit. Buckets: metrics.LinearBuckets(50000, 100000, 31), @@ -49,15 +46,6 @@ var ( ) ) -var registerMetrics sync.Once - -// Register all metrics. -func Register() { - registerMetrics.Do(func() { - legacyregistry.MustRegister(RequestBodySizes) - }) -} - func RecordRequestBodySize(ctx context.Context, resource string, verb RequestBodyVerb, size int) { RequestBodySizes.WithContext(ctx).WithLabelValues(resource, string(verb)).Observe(float64(size)) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go index 348b1092d7d..4780c59fd42 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/response.go @@ -18,11 +18,8 @@ package handlers import ( "context" - "encoding/json" "fmt" - "io" "net/http" - "reflect" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -32,228 +29,48 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" - "k8s.io/apiserver/pkg/endpoints/metrics" endpointsrequest "k8s.io/apiserver/pkg/endpoints/request" - - klog "k8s.io/klog/v2" ) -// watchEmbeddedEncoder performs encoding of the embedded object. -// -// NOTE: watchEmbeddedEncoder is NOT thread-safe. -type watchEmbeddedEncoder struct { - encoder runtime.Encoder - - ctx context.Context - - // target, if non-nil, configures transformation type. - // The other options are ignored if target is nil. - target *schema.GroupVersionKind - tableOptions *metav1.TableOptions - scope *RequestScope - - // identifier of the encoder, computed lazily - identifier runtime.Identifier -} - -func newWatchEmbeddedEncoder(ctx context.Context, encoder runtime.Encoder, target *schema.GroupVersionKind, tableOptions *metav1.TableOptions, scope *RequestScope) *watchEmbeddedEncoder { - return &watchEmbeddedEncoder{ - encoder: encoder, - ctx: ctx, - target: target, - tableOptions: tableOptions, - scope: scope, - } -} - -// Encode implements runtime.Encoder interface. -func (e *watchEmbeddedEncoder) Encode(obj runtime.Object, w io.Writer) error { +// transformObject takes the object as returned by storage and ensures it is in +// the client's desired form, as well as ensuring any API level fields like self-link +// are properly set. +func transformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(e.Identifier(), e.doEncode, w) - } - return e.doEncode(obj, w) -} - -func (e *watchEmbeddedEncoder) doEncode(obj runtime.Object, w io.Writer) error { - result, err := doTransformObject(e.ctx, obj, e.tableOptions, e.target, e.scope) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err)) - result = obj - } - - // When we are tranforming to a table, use the original table options when - // we should print headers only on the first object - headers should be - // omitted on subsequent events. - if e.tableOptions != nil && !e.tableOptions.NoHeaders { - e.tableOptions.NoHeaders = true - // With options change, we should recompute the identifier. - // Clearing this will trigger lazy recompute when needed. - e.identifier = "" - } - - return e.encoder.Encode(result, w) -} - -// Identifier implements runtime.Encoder interface. -func (e *watchEmbeddedEncoder) Identifier() runtime.Identifier { - if e.identifier == "" { - e.identifier = e.embeddedIdentifier() - } - return e.identifier -} - -type watchEmbeddedEncoderIdentifier struct { - Name string `json:"name,omitempty"` - Encoder string `json:"encoder,omitempty"` - Target string `json:"target,omitempty"` - Options metav1.TableOptions `json:"options,omitempty"` - NoHeaders bool `json:"noHeaders,omitempty"` -} - -func (e *watchEmbeddedEncoder) embeddedIdentifier() runtime.Identifier { - if e.target == nil { - // If no conversion is performed, we effective only use - // the embedded identifier. - return e.encoder.Identifier() - } - identifier := watchEmbeddedEncoderIdentifier{ - Name: "watch-embedded", - Encoder: string(e.encoder.Identifier()), - Target: e.target.String(), - } - if e.target.Kind == "Table" && e.tableOptions != nil { - identifier.Options = *e.tableOptions - identifier.NoHeaders = e.tableOptions.NoHeaders - } - - result, err := json.Marshal(identifier) - if err != nil { - klog.Fatalf("Failed marshaling identifier for watchEmbeddedEncoder: %v", err) - } - return runtime.Identifier(result) -} - -// watchEncoder performs encoding of the watch events. -// -// NOTE: watchEncoder is NOT thread-safe. -type watchEncoder struct { - ctx context.Context - kind schema.GroupVersionKind - embeddedEncoder runtime.Encoder - encoder runtime.Encoder - framer io.Writer - - buffer runtime.Splice - eventBuffer runtime.Splice - - currentEmbeddedIdentifier runtime.Identifier - identifiers map[watch.EventType]runtime.Identifier -} - -func newWatchEncoder(ctx context.Context, kind schema.GroupVersionKind, embeddedEncoder runtime.Encoder, encoder runtime.Encoder, framer io.Writer) *watchEncoder { - return &watchEncoder{ - ctx: ctx, - kind: kind, - embeddedEncoder: embeddedEncoder, - encoder: encoder, - framer: framer, - buffer: runtime.NewSpliceBuffer(), - eventBuffer: runtime.NewSpliceBuffer(), - } -} - -// Encode encodes a given watch event. -// NOTE: if events object is implementing the CacheableObject interface, -// -// the serialized version is cached in that object [not the event itself]. -func (e *watchEncoder) Encode(event watch.Event) error { - encodeFunc := func(obj runtime.Object, w io.Writer) error { - return e.doEncode(obj, event, w) - } - if co, ok := event.Object.(runtime.CacheableObject); ok { - return co.CacheEncode(e.identifier(event.Type), encodeFunc, e.framer) - } - return encodeFunc(event.Object, e.framer) -} - -func (e *watchEncoder) doEncode(obj runtime.Object, event watch.Event, w io.Writer) error { - defer e.buffer.Reset() - - if err := e.embeddedEncoder.Encode(obj, e.buffer); err != nil { - return fmt.Errorf("unable to encode watch object %T: %v", obj, err) - } - - // ContentType is not required here because we are defaulting to the serializer type. - outEvent := &metav1.WatchEvent{ - Type: string(event.Type), - Object: runtime.RawExtension{Raw: e.buffer.Bytes()}, - } - metrics.WatchEventsSizes.WithContext(e.ctx).WithLabelValues(e.kind.Group, e.kind.Version, e.kind.Kind).Observe(float64(len(outEvent.Object.Raw))) - - defer e.eventBuffer.Reset() - if err := e.encoder.Encode(outEvent, e.eventBuffer); err != nil { - return fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e) - } - - _, err := w.Write(e.eventBuffer.Bytes()) - return err -} - -type watchEncoderIdentifier struct { - Name string `json:"name,omitempty"` - EmbeddedEncoder string `json:"embeddedEncoder,omitempty"` - Encoder string `json:"encoder,omitempty"` - EventType string `json:"eventType,omitempty"` -} - -func (e *watchEncoder) identifier(eventType watch.EventType) runtime.Identifier { - // We need to take into account that in embeddedEncoder includes table - // transformer, then its identifier is dynamic. As a result, whenever - // the identifier of embeddedEncoder changes, we need to invalidate the - // whole identifiers cache. - // TODO(wojtek-t): Can we optimize it somehow? - if e.currentEmbeddedIdentifier != e.embeddedEncoder.Identifier() { - e.currentEmbeddedIdentifier = e.embeddedEncoder.Identifier() - e.identifiers = map[watch.EventType]runtime.Identifier{} - } - if _, ok := e.identifiers[eventType]; !ok { - e.identifiers[eventType] = e.typeIdentifier(eventType) - } - return e.identifiers[eventType] -} - -func (e *watchEncoder) typeIdentifier(eventType watch.EventType) runtime.Identifier { - // The eventType is a non-standard pattern. This is coming from the fact - // that we're effectively serializing the whole watch event, but storing - // it in serializations of the Object within the watch event. - identifier := watchEncoderIdentifier{ - Name: "watch", - EmbeddedEncoder: string(e.embeddedEncoder.Identifier()), - Encoder: string(e.encoder.Identifier()), - EventType: string(eventType), - } - - result, err := json.Marshal(identifier) - if err != nil { - klog.Fatalf("Failed marshaling identifier for watchEncoder: %v", err) + if mediaType.Convert != nil { + // Non-nil mediaType.Convert means that some conversion of the object + // has to happen. Currently conversion may potentially modify the + // object or assume something about it (e.g. asTable operates on + // reflection, which won't work for any wrapper). + // To ensure it will work correctly, let's operate on base objects + // and not cache it for now. + // + // TODO: Long-term, transformObject should be changed so that it + // implements runtime.Encoder interface. + return doTransformObject(ctx, co.GetObject(), opts, mediaType, scope, req) + } } - return runtime.Identifier(result) + return doTransformObject(ctx, obj, opts, mediaType, scope, req) } -// doTransformResponseObject is used for handling all requests, including watch. -func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, target *schema.GroupVersionKind, scope *RequestScope) (runtime.Object, error) { +func doTransformObject(ctx context.Context, obj runtime.Object, opts interface{}, mediaType negotiation.MediaTypeOptions, scope *RequestScope, req *http.Request) (runtime.Object, error) { if _, ok := obj.(*metav1.Status); ok { return obj, nil } - switch { + // ensure that for empty lists we don't return items. + // This is safe to modify without deep-copying the object, as + // List objects themselves are never cached. + if meta.IsListType(obj) && meta.LenList(obj) == 0 { + if err := meta.SetList(obj, []runtime.Object{}); err != nil { + return nil, err + } + } + + switch target := mediaType.Convert; { case target == nil: - // If we ever change that from a no-op, the identifier of - // the watchEmbeddedEncoder has to be adjusted accordingly. return obj, nil case target.Kind == "PartialObjectMetadata": @@ -311,7 +128,6 @@ func targetEncodingForTransform(scope *RequestScope, mediaType negotiation.Media // transformResponseObject takes an object loaded from storage and performs any necessary transformations. // Will write the complete response object. -// transformResponseObject is used only for handling non-streaming requests. func transformResponseObject(ctx context.Context, scope *RequestScope, req *http.Request, w http.ResponseWriter, statusCode int, mediaType negotiation.MediaTypeOptions, result runtime.Object) { options, err := optionsForTransform(mediaType, req) if err != nil { @@ -319,19 +135,9 @@ func transformResponseObject(ctx context.Context, scope *RequestScope, req *http return } - // ensure that for empty lists we don't return items. - // This is safe to modify without deep-copying the object, as - // List objects themselves are never cached. - if meta.IsListType(result) && meta.LenList(result) == 0 { - if err := meta.SetList(result, []runtime.Object{}); err != nil { - scope.err(err, w, req) - return - } - } - var obj runtime.Object do := func() { - obj, err = doTransformObject(ctx, result, options, mediaType.Convert, scope) + obj, err = transformObject(ctx, result, options, mediaType, scope, req) } endpointsrequest.TrackTransformResponseObjectLatency(ctx, do) diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go index 760c9bf40b8..7d273d62248 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/trace_util.go @@ -27,11 +27,6 @@ func traceFields(req *http.Request) []attribute.KeyValue { attribute.Stringer("accept", &lazyAccept{req: req}), attribute.Stringer("audit-id", &lazyAuditID{req: req}), attribute.Stringer("client", &lazyClientIP{req: req}), - attribute.Stringer("api-group", &lazyAPIGroup{req: req}), - attribute.Stringer("api-version", &lazyAPIVersion{req: req}), - attribute.Stringer("name", &lazyName{req: req}), - attribute.Stringer("subresource", &lazySubresource{req: req}), - attribute.Stringer("namespace", &lazyNamespace{req: req}), attribute.String("protocol", req.Proto), attribute.Stringer("resource", &lazyResource{req: req}), attribute.Stringer("scope", &lazyScope{req: req}), diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go index 6e86b79be55..79cb11ca600 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/handlers/watch.go @@ -19,7 +19,9 @@ package handlers import ( "bytes" "fmt" + "io" "net/http" + "reflect" "time" "golang.org/x/net/websocket" @@ -27,15 +29,13 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/util/httpstream/wsstream" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/features" - "k8s.io/apiserver/pkg/storage" - utilfeature "k8s.io/apiserver/pkg/util/feature" ) // nothing will ever be sent down this channel @@ -63,7 +63,7 @@ func (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) { // serveWatch will serve a watch response. // TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled. -func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration, metricsScope string) { +func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) { defer watcher.Stop() options, err := optionsForTransform(mediaTypeOptions, req) @@ -92,8 +92,6 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n mediaType += ";stream=watch" } - ctx := req.Context() - // locate the appropriate embedded encoder based on the transform var embeddedEncoder runtime.Encoder contentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req) @@ -108,41 +106,13 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n embeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion()) } - var memoryAllocator runtime.MemoryAllocator - - if encoderWithAllocator, supportsAllocator := embeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator { - // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. - // instead, we allocate the buffer for the entire watch session and release it when we close the connection. - memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) - defer runtime.AllocatorPool.Put(memoryAllocator) - embeddedEncoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator) - } - var tableOptions *metav1.TableOptions - if options != nil { - if passedOptions, ok := options.(*metav1.TableOptions); ok { - tableOptions = passedOptions - } else { - scope.err(fmt.Errorf("unexpected options type: %T", options), w, req) - return - } - } - embeddedEncoder = newWatchEmbeddedEncoder(ctx, embeddedEncoder, mediaTypeOptions.Convert, tableOptions, scope) - - if encoderWithAllocator, supportsAllocator := encoder.(runtime.EncoderWithAllocator); supportsAllocator { - if memoryAllocator == nil { - // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. - // instead, we allocate the buffer for the entire watch session and release it when we close the connection. - memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) - defer runtime.AllocatorPool.Put(memoryAllocator) - } - encoder = runtime.NewEncoderWithAllocator(encoderWithAllocator, memoryAllocator) - } - var serverShuttingDownCh <-chan struct{} if signals := apirequest.ServerShutdownSignalFrom(req.Context()); signals != nil { serverShuttingDownCh = signals.ShuttingDown() } + ctx := req.Context() + server := &WatchServer{ Watching: watcher, Scope: scope, @@ -153,10 +123,23 @@ func serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions n Encoder: encoder, EmbeddedEncoder: embeddedEncoder, + Fixup: func(obj runtime.Object) runtime.Object { + result, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to transform object %v: %v", reflect.TypeOf(obj), err)) + return obj + } + // When we are transformed to a table, use the table options as the state for whether we + // should print headers - on watch, we only want to print table headers on the first object + // and omit them on subsequent events. + if tableOptions, ok := options.(*metav1.TableOptions); ok { + tableOptions.NoHeaders = true + } + return result + }, + TimeoutFactory: &realTimeoutFactory{timeout}, ServerShuttingDownCh: serverShuttingDownCh, - - metricsScope: metricsScope, } server.ServeHTTP(w, req) @@ -177,11 +160,11 @@ type WatchServer struct { Encoder runtime.Encoder // used to encode the nested object in the watch stream EmbeddedEncoder runtime.Encoder + // used to correct the object before we send it to the serializer + Fixup func(runtime.Object) runtime.Object TimeoutFactory TimeoutFactory ServerShuttingDownCh <-chan struct{} - - metricsScope string } // ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked @@ -212,6 +195,17 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } + var e streaming.Encoder + var memoryAllocator runtime.MemoryAllocator + + if encoder, supportsAllocator := s.Encoder.(runtime.EncoderWithAllocator); supportsAllocator { + memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) + defer runtime.AllocatorPool.Put(memoryAllocator) + e = streaming.NewEncoderWithAllocator(framer, encoder, memoryAllocator) + } else { + e = streaming.NewEncoder(framer, s.Encoder) + } + // ensure the connection times out timeoutCh, cleanup := s.TimeoutFactory.TimeoutCh() defer cleanup() @@ -222,10 +216,26 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusOK) flusher.Flush() - watchEncoder := newWatchEncoder(req.Context(), kind, s.EmbeddedEncoder, s.Encoder, framer) + var unknown runtime.Unknown + internalEvent := &metav1.InternalEvent{} + outEvent := &metav1.WatchEvent{} + buf := runtime.NewSpliceBuffer() ch := s.Watching.ResultChan() done := req.Context().Done() + embeddedEncodeFn := s.EmbeddedEncoder.Encode + if encoder, supportsAllocator := s.EmbeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator { + if memoryAllocator == nil { + // don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call. + // instead, we allocate the buffer for the entire watch session and release it when we close the connection. + memoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator) + defer runtime.AllocatorPool.Put(memoryAllocator) + } + embeddedEncodeFn = func(obj runtime.Object, w io.Writer) error { + return encoder.EncodeWithAllocator(obj, w, memoryAllocator) + } + } + for { select { case <-s.ServerShuttingDownCh: @@ -247,20 +257,42 @@ func (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { return } metrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc() - isWatchListLatencyRecordingRequired := shouldRecordWatchListLatency(event) - if err := watchEncoder.Encode(event); err != nil { - utilruntime.HandleError(err) - // client disconnect. + obj := s.Fixup(event.Object) + if err := embeddedEncodeFn(obj, buf); err != nil { + // unexpected error + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) return } + // ContentType is not required here because we are defaulting to the serializer + // type + unknown.Raw = buf.Bytes() + event.Object = &unknown + metrics.WatchEventsSizes.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw))) + + *outEvent = metav1.WatchEvent{} + + // create the external type directly and encode it. Clients will only recognize the serialization we provide. + // The internal event is being reused, not reallocated so its just a few extra assignments to do it this way + // and we get the benefit of using conversion functions which already have to stay in sync + *internalEvent = metav1.InternalEvent(event) + err := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) + // client disconnect. + return + } + if err := e.Encode(outEvent); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v (%#v)", outEvent, err, e)) + // client disconnect. + return + } if len(ch) == 0 { flusher.Flush() } - if isWatchListLatencyRecordingRequired { - metrics.RecordWatchListLatency(req.Context(), s.Scope.Resource, s.metricsScope) - } + + buf.Reset() } } } @@ -294,10 +326,10 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { // End of results. return } - - if err := s.EmbeddedEncoder.Encode(event.Object, buf); err != nil { + obj := s.Fixup(event.Object) + if err := s.EmbeddedEncoder.Encode(obj, buf); err != nil { // unexpected error - utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", event.Object, err)) + utilruntime.HandleError(fmt.Errorf("unable to encode watch object %T: %v", obj, err)) return } @@ -339,19 +371,3 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) { } } } - -func shouldRecordWatchListLatency(event watch.Event) bool { - if event.Type != watch.Bookmark || !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { - return false - } - // as of today the initial-events-end annotation is added only to a single event - // by the watch cache and only when certain conditions are met - // - // for more please read https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list - hasAnnotation, err := storage.HasInitialEventsEndBookmarkAnnotation(event.Object) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to determine if the obj has the required annotation for measuring watchlist latency, obj %T: %v", event.Object, err)) - return false - } - return hasAnnotation -} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go index ffd4a7dcbfb..042bd802f1a 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/installer.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/installer.go @@ -796,7 +796,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -817,7 +817,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...). Returns(http.StatusOK, "OK", versionedList). @@ -850,7 +850,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.PUT(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -879,7 +879,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.PATCH(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Consumes(supportedTypes...). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). @@ -909,7 +909,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } route := ws.POST(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). @@ -938,7 +938,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.DELETE(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(deleteReturnType). @@ -962,7 +962,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.DELETE(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(versionedStatus). @@ -990,7 +990,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). @@ -1011,7 +1011,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag handler = utilwarning.AddWarningsHandler(handler, warnings) route := ws.GET(action.Path).To(handler). Doc(doc). - Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).")). + Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 48fc951adee..ba2aed69d44 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -18,7 +18,6 @@ package metrics import ( "context" - "fmt" "net/http" "net/url" "strconv" @@ -27,12 +26,8 @@ import ( "time" restful "github.com/emicklei/go-restful/v3" - - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/audit" "k8s.io/apiserver/pkg/authentication/user" @@ -285,17 +280,6 @@ var ( []string{"code_path"}, ) - watchListLatencies = compbasemetrics.NewHistogramVec( - &compbasemetrics.HistogramOpts{ - Subsystem: APIServerComponent, - Name: "watch_list_duration_seconds", - Help: "Response latency distribution in seconds for watch list requests broken by group, version, resource and scope.", - Buckets: []float64{0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1.0, 2, 4, 6, 8, 10, 15, 20, 30, 45, 60}, - StabilityLevel: compbasemetrics.ALPHA, - }, - []string{"group", "version", "resource", "scope"}, - ) - metrics = []resettableCollector{ deprecatedRequestGauge, requestCounter, @@ -316,7 +300,6 @@ var ( requestAbortsTotal, requestPostTimeoutTotal, requestTimestampComparisonDuration, - watchListLatencies, } // these are the valid request methods which we report in our metrics. Any other request methods @@ -528,18 +511,6 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp fn() } -// RecordWatchListLatency simply records response latency for watch list requests. -func RecordWatchListLatency(ctx context.Context, gvr schema.GroupVersionResource, metricsScope string) { - requestReceivedTimestamp, ok := request.ReceivedTimestampFrom(ctx) - if !ok { - utilruntime.HandleError(fmt.Errorf("unable to measure watchlist latency because no received ts found in the ctx, gvr: %s", gvr)) - return - } - elapsedSeconds := time.Since(requestReceivedTimestamp).Seconds() - - watchListLatencies.WithContext(ctx).WithLabelValues(gvr.Group, gvr.Version, gvr.Resource, metricsScope).Observe(elapsedSeconds) -} - // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component string, deprecated bool, removedRelease string, httpCode, respSize int, elapsed time.Duration) { @@ -650,26 +621,6 @@ func CleanScope(requestInfo *request.RequestInfo) string { return "" } -// CleanListScope computes the request scope for metrics. -// -// Note that normally we would use CleanScope for computation. -// But due to the same reasons mentioned in determineRequestNamespaceAndName we cannot. -func CleanListScope(ctx context.Context, opts *metainternalversion.ListOptions) string { - namespace, name := determineRequestNamespaceAndName(ctx, opts) - if len(name) > 0 { - return "resource" - } - if len(namespace) > 0 { - return "namespace" - } - if requestInfo, ok := request.RequestInfoFrom(ctx); ok { - if requestInfo.IsResourceRequest { - return "cluster" - } - } - return "" -} - // CanonicalVerb distinguishes LISTs from GETs (and HEADs). It assumes verb is // UPPERCASE. func CanonicalVerb(verb string, scope string) string { @@ -704,30 +655,6 @@ func CleanVerb(verb string, request *http.Request, requestInfo *request.RequestI return reportedVerb } -// determineRequestNamespaceAndName computes name and namespace for the given requests -// -// note that the logic of this function was copy&pasted from cacher.go -// after an unsuccessful attempt of moving it to RequestInfo -// -// see: https://github.com/kubernetes/kubernetes/pull/120520 -func determineRequestNamespaceAndName(ctx context.Context, opts *metainternalversion.ListOptions) (namespace, name string) { - if requestNamespace, ok := request.NamespaceFrom(ctx); ok && len(requestNamespace) > 0 { - namespace = requestNamespace - } else if opts != nil && opts.FieldSelector != nil { - if selectorNamespace, ok := opts.FieldSelector.RequiresExactMatch("metadata.namespace"); ok { - namespace = selectorNamespace - } - } - if requestInfo, ok := request.RequestInfoFrom(ctx); ok && requestInfo != nil && len(requestInfo.Name) > 0 { - name = requestInfo.Name - } else if opts != nil && opts.FieldSelector != nil { - if selectorName, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok { - name = selectorName - } - } - return -} - // cleanVerb additionally ensures that unknown verbs don't clog up the metrics. func cleanVerb(verb, suggestedVerb string, request *http.Request, requestInfo *request.RequestInfo) string { // CanonicalVerb (being an input for this function) doesn't handle correctly the diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index e524e0c6474..d06447a7e50 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -54,7 +54,6 @@ const ( // owner: @smarterclayton // alpha: v1.8 // beta: v1.9 - // stable: 1.29 // // Allow API clients to retrieve resource lists in chunks rather than // all at once. @@ -63,7 +62,6 @@ const ( // owner: @MikeSpreitzer @yue9944882 // alpha: v1.18 // beta: v1.20 - // stable: 1.29 // // Enables managing request concurrency with prioritization and fairness at each server. // The FeatureGate was introduced in release 1.15 but the feature @@ -101,7 +99,6 @@ const ( // kep: https://kep.k8s.io/2876 // alpha: v1.23 // beta: v1.25 - // stable: v1.29 // // Enables expression validation for Custom Resource CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" @@ -124,7 +121,6 @@ const ( // kep: https://kep.k8s.io/3299 // alpha: v1.25 // beta: v1.27 - // stable: v1.29 // // Enables KMS v2 API for encryption at rest. KMSv2 featuregate.Feature = "KMSv2" @@ -132,7 +128,6 @@ const ( // owner: @enj // kep: https://kep.k8s.io/3299 // beta: v1.28 - // stable: v1.29 // // Enables the use of derived encryption keys with KMS v2. KMSv2KDF featuregate.Feature = "KMSv2KDF" @@ -146,10 +141,18 @@ const ( // in the spec returned from kube-apiserver. OpenAPIEnums featuregate.Feature = "OpenAPIEnums" + // owner: @jefftree + // kep: https://kep.k8s.io/2896 + // alpha: v1.23 + // beta: v1.24 + // stable: v1.27 + // + // Enables kubernetes to publish OpenAPI v3 + OpenAPIV3 featuregate.Feature = "OpenAPIV3" + // owner: @caesarxuchao // alpha: v1.15 // beta: v1.16 - // stable: 1.29 // // Allow apiservers to show a count of remaining items in the response // to a chunking list request. @@ -218,20 +221,6 @@ const ( // document. StorageVersionHash featuregate.Feature = "StorageVersionHash" - // owner: @aramase, @enj, @nabokihms - // kep: https://kep.k8s.io/3331 - // alpha: v1.29 - // - // Enables Structured Authentication Configuration - StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration" - - // owner: @palnabarun - // kep: https://kep.k8s.io/3221 - // alpha: v1.29 - // - // Enables Structured Authorization Configuration - StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration" - // owner: @wojtek-t // alpha: v1.15 // beta: v1.16 @@ -265,14 +254,6 @@ const ( // // Allow the API server to serve consistent lists from cache ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache" - - // owner: @tkashem - // beta: v1.29 - // - // Allow Priority & Fairness in the API server to use a zero value for - // the 'nominalConcurrencyShares' field of the 'limited' section of a - // priority level. - ZeroLimitedNominalConcurrencyShares featuregate.Feature = "ZeroLimitedNominalConcurrencyShares" ) func init() { @@ -288,9 +269,9 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.Beta}, - APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 + APIListChunking: {Default: true, PreRelease: featuregate.Beta}, - APIPriorityAndFairness: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, @@ -300,19 +281,21 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Beta}, - CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, + KMSv1: {Default: true, PreRelease: featuregate.Deprecated}, - KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + KMSv2: {Default: true, PreRelease: featuregate.Beta}, - KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + KMSv2KDF: {Default: false, PreRelease: featuregate.Beta}, // default and lock to true in 1.29, remove in 1.31 OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 + OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + + RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, @@ -326,11 +309,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - StructuredAuthenticationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, - - StructuredAuthorizationConfiguration: {Default: false, PreRelease: featuregate.Alpha}, - - UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, + UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta}, WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, @@ -341,6 +320,4 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchList: {Default: false, PreRelease: featuregate.Alpha}, ConsistentListFromCache: {Default: false, PreRelease: featuregate.Alpha}, - - ZeroLimitedNominalConcurrencyShares: {Default: false, PreRelease: featuregate.Beta}, } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go index 3c974f3981e..3983c92d013 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/registry/storage_factory.go @@ -44,7 +44,7 @@ func StorageWithCacher() generic.StorageDecorator { triggerFuncs storage.IndexerFuncs, indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { - s, d, err := generic.NewRawStorage(storageConfig, newFunc, newListFunc, resourcePrefix) + s, d, err := generic.NewRawStorage(storageConfig, newFunc) if err != nil { return s, d, err } diff --git a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go index 4c2b2fc0ed5..715aa104773 100644 --- a/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go +++ b/vendor/k8s.io/apiserver/pkg/registry/generic/storage_decorator.go @@ -47,12 +47,12 @@ func UndecoratedStorage( getAttrsFunc storage.AttrFunc, trigger storage.IndexerFuncs, indexers *cache.Indexers) (storage.Interface, factory.DestroyFunc, error) { - return NewRawStorage(config, newFunc, newListFunc, resourcePrefix) + return NewRawStorage(config, newFunc) } // NewRawStorage creates the low level kv storage. This is a work-around for current // two layer of same storage interface. // TODO: Once cacher is enabled on all registries (event registry is special), we will remove this method. -func NewRawStorage(config *storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, factory.DestroyFunc, error) { - return factory.Create(*config, newFunc, newListFunc, resourcePrefix) +func NewRawStorage(config *storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, factory.DestroyFunc, error) { + return factory.Create(*config, newFunc) } diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index beff08f14d0..047736e57d4 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -78,7 +78,6 @@ import ( "k8s.io/component-base/tracing" "k8s.io/klog/v2" openapicommon "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/utils/clock" utilsnet "k8s.io/utils/net" @@ -195,7 +194,7 @@ type Config struct { // OpenAPIConfig will be used in generating OpenAPI spec. This is nil by default. Use DefaultOpenAPIConfig for "working" defaults. OpenAPIConfig *openapicommon.Config // OpenAPIV3Config will be used in generating OpenAPI V3 spec. This is nil by default. Use DefaultOpenAPIV3Config for "working" defaults. - OpenAPIV3Config *openapicommon.OpenAPIV3Config + OpenAPIV3Config *openapicommon.Config // SkipOpenAPIInstallation avoids installing the OpenAPI handler if set to true. SkipOpenAPIInstallation bool @@ -483,23 +482,8 @@ func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, de } // DefaultOpenAPIV3Config provides the default OpenAPIV3Config used to build the OpenAPI V3 spec -func DefaultOpenAPIV3Config(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.OpenAPIV3Config { - defaultConfig := &openapicommon.OpenAPIV3Config{ - IgnorePrefixes: []string{}, - Info: &spec.Info{ - InfoProps: spec.InfoProps{ - Title: "Generic API Server", - }, - }, - DefaultResponse: &spec3.Response{ - ResponseProps: spec3.ResponseProps{ - Description: "Default Response.", - }, - }, - GetOperationIDAndTags: apiopenapi.GetOperationIDAndTags, - GetDefinitionName: defNamer.GetDefinitionName, - GetDefinitions: getDefinitions, - } +func DefaultOpenAPIV3Config(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config { + defaultConfig := DefaultOpenAPIConfig(getDefinitions, defNamer) defaultConfig.Definitions = getDefinitions(func(name string) spec.Ref { defName, _ := defaultConfig.GetDefinitionName(name) return spec.MustCreateRef("#/components/schemas/" + openapicommon.EscapeJsonPointer(defName)) @@ -624,45 +608,6 @@ func completeOpenAPI(config *openapicommon.Config, version *version.Info) { } } -func completeOpenAPIV3(config *openapicommon.OpenAPIV3Config, version *version.Info) { - if config == nil { - return - } - if config.SecuritySchemes != nil { - // Setup OpenAPI security: all APIs will have the same authentication for now. - config.DefaultSecurity = []map[string][]string{} - keys := []string{} - for k := range config.SecuritySchemes { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - config.DefaultSecurity = append(config.DefaultSecurity, map[string][]string{k: {}}) - } - if config.CommonResponses == nil { - config.CommonResponses = map[int]*spec3.Response{} - } - if _, exists := config.CommonResponses[http.StatusUnauthorized]; !exists { - config.CommonResponses[http.StatusUnauthorized] = &spec3.Response{ - ResponseProps: spec3.ResponseProps{ - Description: "Unauthorized", - }, - } - } - } - // make sure we populate info, and info.version, if not manually set - if config.Info == nil { - config.Info = &spec.Info{} - } - if config.Info.Version == "" { - if version != nil { - config.Info.Version = strings.Split(version.String(), "-")[0] - } else { - config.Info.Version = "unversioned" - } - } -} - // DrainedNotify returns a lifecycle signal of genericapiserver already drained while shutting down. func (c *Config) DrainedNotify() <-chan struct{} { return c.lifecycleSignals.InFlightRequestsDrained.Signaled() @@ -688,7 +633,7 @@ func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedCo } completeOpenAPI(c.OpenAPIConfig, c.Version) - completeOpenAPIV3(c.OpenAPIV3Config, c.Version) + completeOpenAPI(c.OpenAPIV3Config, c.Version) if c.DiscoveryAddresses == nil { c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress} @@ -724,12 +669,6 @@ func (c *RecommendedConfig) Complete() CompletedConfig { return c.Config.Complete(c.SharedInformerFactory) } -var allowedMediaTypes = []string{ - runtime.ContentTypeJSON, - runtime.ContentTypeYAML, - runtime.ContentTypeProtobuf, -} - // New creates a new server which logically combines the handling chain with the passed server. // name is used to differentiate for logging. The handler chain in particular can be difficult as it starts delegating. // delegationTarget may not be nil. @@ -737,18 +676,6 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G if c.Serializer == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil") } - for _, info := range c.Serializer.SupportedMediaTypes() { - var ok bool - for _, mt := range allowedMediaTypes { - if info.MediaType == mt { - ok = true - break - } - } - if !ok { - return nil, fmt.Errorf("refusing to create new apiserver %q with support for media type %q (allowed media types are: %s)", name, info.MediaType, strings.Join(allowedMediaTypes, ", ")) - } - } if c.LoopbackClientConfig == nil { return nil, fmt.Errorf("Genericapiserver.New() called with config.LoopbackClientConfig == nil") } @@ -1067,10 +994,14 @@ func installAPI(s *GenericAPIServer, c *Config) { if c.EnableMetrics { if c.EnableProfiling { routes.MetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) - slis.SLIMetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) + if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { + slis.SLIMetricsWithReset{}.Install(s.Handler.NonGoRestfulMux) + } } else { routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux) - slis.SLIMetrics{}.Install(s.Handler.NonGoRestfulMux) + if utilfeature.DefaultFeatureGate.Enabled(features.ComponentSLIs) { + slis.SLIMetrics{}.Install(s.Handler.NonGoRestfulMux) + } } } @@ -1084,7 +1015,7 @@ func installAPI(s *GenericAPIServer, c *Config) { s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService()) } } - if c.FlowControl != nil { + if c.FlowControl != nil && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.APIPriorityAndFairness) { c.FlowControl.Install(s.Handler.NonGoRestfulMux) } } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go index 75bc49e9931..6dbed6a650d 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -21,7 +21,7 @@ import ( "context" "crypto/x509" "fmt" - "os" + "io/ioutil" "sync/atomic" "time" @@ -98,7 +98,7 @@ func (c *DynamicFileCAContent) AddListener(listener Listener) { // loadCABundle determines the next set of content for the file. func (c *DynamicFileCAContent) loadCABundle() error { - caBundle, err := os.ReadFile(c.filename) + caBundle, err := ioutil.ReadFile(c.filename) if err != nil { return err } diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go index 62aef4992c5..36c4d45868f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -20,7 +20,7 @@ import ( "context" "crypto/tls" "fmt" - "os" + "io/ioutil" "sync/atomic" "time" @@ -80,11 +80,11 @@ func (c *DynamicCertKeyPairContent) AddListener(listener Listener) { // loadCertKeyPair determines the next set of content for the file. func (c *DynamicCertKeyPairContent) loadCertKeyPair() error { - cert, err := os.ReadFile(c.certFile) + cert, err := ioutil.ReadFile(c.certFile) if err != nil { return err } - key, err := os.ReadFile(c.keyFile) + key, err := ioutil.ReadFile(c.keyFile) if err != nil { return err } diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go index ce9a3691a96..2df786e13f0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -18,7 +18,7 @@ package egressselector import ( "fmt" - "os" + "io/ioutil" "strings" "k8s.io/apimachinery/pkg/runtime" @@ -51,7 +51,7 @@ func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSe return nil, nil } // a file was provided, so we just read it. - data, err := os.ReadFile(configFilePath) + data, err := ioutil.ReadFile(configFilePath) if err != nil { return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err) } diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go index a38ef646498..0936d6ef4e2 100644 --- a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -22,10 +22,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "io/ioutil" "net" "net/http" "net/url" - "os" "strings" "time" @@ -277,7 +277,7 @@ func getTLSConfig(t *apiserver.TLSConfig) (*tls.Config, error) { } certPool := x509.NewCertPool() if caCert != "" { - certBytes, err := os.ReadFile(caCert) + certBytes, err := ioutil.ReadFile(caCert) if err != nil { return nil, fmt.Errorf("failed to read cert file %s, got %v", caCert, err) } diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 1e91b9a3115..05cc44263fb 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -26,7 +26,7 @@ import ( "sync/atomic" "time" - flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" apitypes "k8s.io/apimachinery/pkg/types" epmetrics "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" diff --git a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go index 450c7d4f64b..665f20bebdb 100644 --- a/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go +++ b/vendor/k8s.io/apiserver/pkg/server/genericapiserver.go @@ -158,7 +158,7 @@ type GenericAPIServer struct { openAPIConfig *openapicommon.Config // Enable swagger and/or OpenAPI V3 if these configs are non-nil. - openAPIV3Config *openapicommon.OpenAPIV3Config + openAPIV3Config *openapicommon.Config // SkipOpenAPIInstallation indicates not to install the OpenAPI handler // during PrepareRun. @@ -430,9 +430,11 @@ func (s *GenericAPIServer) PrepareRun() preparedGenericAPIServer { } if s.openAPIV3Config != nil && !s.skipOpenAPIInstallation { - s.OpenAPIV3VersionedService = routes.OpenAPI{ - V3Config: s.openAPIV3Config, - }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + if utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { + s.OpenAPIV3VersionedService = routes.OpenAPI{ + Config: s.openAPIV3Config, + }.InstallV3(s.Handler.GoRestfulContainer, s.Handler.NonGoRestfulMux) + } } s.installHealthz() diff --git a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go index a4b4c5899fd..4d6248f834f 100644 --- a/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go +++ b/vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go @@ -205,6 +205,7 @@ func StatusIsNot(statuses ...int) StacktracePred { func (rl *respLogger) Addf(format string, data ...interface{}) { rl.mutex.Lock() defer rl.mutex.Unlock() + rl.addedInfo.WriteString("\n") rl.addedInfo.WriteString(fmt.Sprintf(format, data...)) } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go index 6ab58bab249..13968b4e7d9 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/api_enablement.go @@ -42,9 +42,6 @@ func NewAPIEnablementOptions() *APIEnablementOptions { // AddFlags adds flags for a specific APIServer to the specified FlagSet func (s *APIEnablementOptions) AddFlags(fs *pflag.FlagSet) { - if s == nil { - return - } fs.Var(&s.RuntimeConfig, "runtime-config", ""+ "A set of key=value pairs that enable or disable built-in APIs. Supported options are:\n"+ "v1=true|false for the core API group\n"+ @@ -90,6 +87,7 @@ func (s *APIEnablementOptions) Validate(registries ...GroupRegistry) []error { // ApplyTo override MergedResourceConfig with defaults and registry func (s *APIEnablementOptions) ApplyTo(c *server.Config, defaultResourceConfig *serverstore.ResourceConfig, registry resourceconfig.GroupVersionRegistry) error { + if s == nil { return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 5b3da51faf8..fcb41942ff2 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -105,36 +105,10 @@ const ( kmsReloadHealthCheckName = "kms-providers" ) -var codecs serializer.CodecFactory - -// this atomic bool allows us to swap enablement of the KMSv2KDF feature in tests -// as the feature gate is now locked to true starting with v1.29 -// Note: it cannot be set by an end user -var kdfDisabled atomic.Bool - -// this function should only be called in tests to swap enablement of the KMSv2KDF feature -func SetKDFForTests(b bool) func() { - kdfDisabled.Store(!b) - return func() { - kdfDisabled.Store(false) - } -} - -// this function should be used to determine enablement of the KMSv2KDF feature -// instead of getting it from DefaultFeatureGate as the feature gate is now locked -// to true starting with v1.29 -func GetKDF() bool { - return !kdfDisabled.Load() -} - func init() { - configScheme := runtime.NewScheme() - utilruntime.Must(apiserverconfig.AddToScheme(configScheme)) - utilruntime.Must(apiserverconfigv1.AddToScheme(configScheme)) - codecs = serializer.NewCodecFactory(configScheme) - envelopemetrics.RegisterMetrics() - storagevalue.RegisterMetrics() metrics.RegisterMetrics() + storagevalue.RegisterMetrics() + envelopemetrics.RegisterMetrics() } type kmsPluginHealthzResponse struct { @@ -157,8 +131,6 @@ type kmsv2PluginProbe struct { service kmsservice.Service lastResponse *kmsPluginHealthzResponse l *sync.Mutex - apiServerID string - version string } type kmsHealthChecker []healthz.HealthChecker @@ -212,13 +184,13 @@ type EncryptionConfiguration struct { // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. // If reload is true, or KMS v2 plugins are used with no KMS v1 plugins, the returned slice of health checkers will always be of length 1. -func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool, apiServerID string) (*EncryptionConfiguration, error) { +func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool) (*EncryptionConfiguration, error) { config, contentHash, err := loadConfig(filepath, reload) if err != nil { return nil, fmt.Errorf("error while parsing file: %w", err) } - transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(ctx, config, apiServerID) + transformers, kmsHealthChecks, kmsUsed, err := getTransformerOverridesAndKMSPluginHealthzCheckers(ctx, config) if err != nil { return nil, fmt.Errorf("error while building transformers: %w", err) } @@ -243,9 +215,9 @@ func LoadEncryptionConfig(ctx context.Context, filepath string, reload bool, api // getTransformerOverridesAndKMSPluginHealthzCheckers creates the set of transformers and KMS healthz checks based on the given config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration, apiServerID string) (map[schema.GroupResource]storagevalue.Transformer, []healthz.HealthChecker, *kmsState, error) { +func getTransformerOverridesAndKMSPluginHealthzCheckers(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthz.HealthChecker, *kmsState, error) { var kmsHealthChecks []healthz.HealthChecker - transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config, apiServerID) + transformers, probes, kmsUsed, err := getTransformerOverridesAndKMSPluginProbes(ctx, config) if err != nil { return nil, nil, nil, err } @@ -264,7 +236,7 @@ type healthChecker interface { // getTransformerOverridesAndKMSPluginProbes creates the set of transformers and KMS probes based on the given config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration, apiServerID string) (map[schema.GroupResource]storagevalue.Transformer, []healthChecker, *kmsState, error) { +func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apiserverconfig.EncryptionConfiguration) (map[schema.GroupResource]storagevalue.Transformer, []healthChecker, *kmsState, error) { resourceToPrefixTransformer := map[schema.GroupResource][]storagevalue.PrefixTransformer{} var probes []healthChecker var kmsUsed kmsState @@ -273,7 +245,7 @@ func getTransformerOverridesAndKMSPluginProbes(ctx context.Context, config *apis for _, resourceConfig := range config.Resources { resourceConfig := resourceConfig - transformers, p, used, err := prefixTransformersAndProbes(ctx, resourceConfig, apiServerID) + transformers, p, used, err := prefixTransformersAndProbes(ctx, resourceConfig) if err != nil { return nil, nil, nil, err } @@ -390,7 +362,7 @@ func (h *kmsv2PluginProbe) rotateDEKOnKeyIDChange(ctx context.Context, statusKey // this gate can only change during tests, but the check is cheap enough to always make // this allows us to easily exercise both modes without restarting the API server // TODO integration test that this dynamically takes effect - useSeed := GetKDF() + useSeed := utilfeature.DefaultFeatureGate.Enabled(features.KMSv2KDF) stateUseSeed := state.EncryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED // state is valid and status keyID is unchanged from when we generated this DEK/seed so there is no need to rotate it @@ -475,23 +447,15 @@ func (h *kmsv2PluginProbe) isKMSv2ProviderHealthyAndMaybeRotateDEK(ctx context.C if response.Healthz != "ok" { errs = append(errs, fmt.Errorf("got unexpected healthz status: %s", response.Healthz)) } - if response.Version != envelopekmsv2.KMSAPIVersionv2 && response.Version != envelopekmsv2.KMSAPIVersionv2beta1 { - errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersionv2, response.Version)) - } else { - // set version for the first status response - if len(h.version) == 0 { - h.version = response.Version - } - if h.version != response.Version { - errs = append(errs, fmt.Errorf("KMSv2 API version should not change after the initial status response version %s, got %s", h.version, response.Version)) - } + if response.Version != envelopekmsv2.KMSAPIVersion { + errs = append(errs, fmt.Errorf("expected KMSv2 API version %s, got %s", envelopekmsv2.KMSAPIVersion, response.Version)) } if errCode, err := envelopekmsv2.ValidateKeyID(response.KeyID); err != nil { envelopemetrics.RecordInvalidKeyIDFromStatus(h.name, string(errCode)) errs = append(errs, fmt.Errorf("got invalid KMSv2 KeyID hash %q: %w", envelopekmsv2.GetHashIfNotEmpty(response.KeyID), err)) } else { - envelopemetrics.RecordKeyIDFromStatus(h.name, response.KeyID, h.apiServerID) + envelopemetrics.RecordKeyIDFromStatus(h.name, response.KeyID) // unconditionally append as we filter out nil errors below errs = append(errs, h.rotateDEKOnKeyIDChange(ctx, response.KeyID, string(uuid.NewUUID()))) } @@ -504,24 +468,6 @@ func (h *kmsv2PluginProbe) isKMSv2ProviderHealthyAndMaybeRotateDEK(ctx context.C // loadConfig parses the encryption configuration file at filepath and returns the parsed config and hash of the file. func loadConfig(filepath string, reload bool) (*apiserverconfig.EncryptionConfiguration, string, error) { - data, contentHash, err := loadDataAndHash(filepath) - if err != nil { - return nil, "", fmt.Errorf("error while loading file: %w", err) - } - - configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) - if err != nil { - return nil, "", fmt.Errorf("error decoding encryption provider configuration file %q: %w", filepath, err) - } - config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) - if !ok { - return nil, "", fmt.Errorf("got unexpected config type: %v", gvk) - } - - return config, contentHash, validation.ValidateEncryptionConfiguration(config, reload).ToAggregate() -} - -func loadDataAndHash(filepath string) ([]byte, string, error) { f, err := os.Open(filepath) if err != nil { return nil, "", fmt.Errorf("error opening encryption provider configuration file %q: %w", filepath, err) @@ -536,20 +482,27 @@ func loadDataAndHash(filepath string) ([]byte, string, error) { return nil, "", fmt.Errorf("encryption provider configuration file %q is empty", filepath) } - return data, computeEncryptionConfigHash(data), nil -} + scheme := runtime.NewScheme() + codecs := serializer.NewCodecFactory(scheme) + utilruntime.Must(apiserverconfig.AddToScheme(scheme)) + utilruntime.Must(apiserverconfigv1.AddToScheme(scheme)) + + configObj, gvk, err := codecs.UniversalDecoder().Decode(data, nil, nil) + if err != nil { + return nil, "", fmt.Errorf("error decoding encryption provider configuration file %q: %w", filepath, err) + } + config, ok := configObj.(*apiserverconfig.EncryptionConfiguration) + if !ok { + return nil, "", fmt.Errorf("got unexpected config type: %v", gvk) + } -// GetEncryptionConfigHash reads the encryption configuration file at filepath and returns the hash of the file. -// It does not attempt to decode or load the config, and serves as a cheap check to determine if the file has changed. -func GetEncryptionConfigHash(filepath string) (string, error) { - _, contentHash, err := loadDataAndHash(filepath) - return contentHash, err + return config, computeEncryptionConfigHash(data), validation.ValidateEncryptionConfiguration(config, reload).ToAggregate() } // prefixTransformersAndProbes creates the set of transformers and KMS probes based on the given resource config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration, apiServerID string) ([]storagevalue.PrefixTransformer, []healthChecker, *kmsState, error) { +func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.ResourceConfiguration) ([]storagevalue.PrefixTransformer, []healthChecker, *kmsState, error) { var transformers []storagevalue.PrefixTransformer var probes []healthChecker var kmsUsed kmsState @@ -577,7 +530,7 @@ func prefixTransformersAndProbes(ctx context.Context, config apiserverconfig.Res transformer, transformerErr = secretboxPrefixTransformer(provider.Secretbox) case provider.KMS != nil: - transformer, probe, used, transformerErr = kmsPrefixTransformer(ctx, provider.KMS, apiServerID) + transformer, probe, used, transformerErr = kmsPrefixTransformer(ctx, provider.KMS) if transformerErr == nil { probes = append(probes, probe) kmsUsed.accumulate(used) @@ -736,7 +689,7 @@ func (s *kmsState) accumulate(other *kmsState) { // kmsPrefixTransformer creates a KMS transformer and probe based on the given KMS config. // It may launch multiple go routines whose lifecycle is controlled by ctx. // In case of an error, the caller is responsible for canceling ctx to clean up any go routines that may have been launched. -func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration, apiServerID string) (storagevalue.PrefixTransformer, healthChecker, *kmsState, error) { +func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfiguration) (storagevalue.PrefixTransformer, healthChecker, *kmsState, error) { kmsName := config.Name switch config.APIVersion { case kmsAPIVersionV1: @@ -782,14 +735,14 @@ func kmsPrefixTransformer(ctx context.Context, config *apiserverconfig.KMSConfig service: envelopeService, l: &sync.Mutex{}, lastResponse: &kmsPluginHealthzResponse{}, - apiServerID: apiServerID, } // initialize state so that Load always works probe.state.Store(&envelopekmsv2.State{}) primeAndProbeKMSv2(ctx, probe, kmsName) + transformer := storagevalue.PrefixTransformer{ - Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, kmsName, probe.getCurrentState, apiServerID), + Transformer: envelopekmsv2.NewEnvelopeTransformer(envelopeService, kmsName, probe.getCurrentState), Prefix: []byte(kmsTransformerPrefixV2 + kmsName + ":"), } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go index cde6a379ecd..94782ccbacd 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/controller/controller.go @@ -20,9 +20,9 @@ import ( "context" "fmt" "net/http" - "sync" "time" + "github.com/fsnotify/fsnotify" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/server/healthz" @@ -35,11 +35,8 @@ import ( // workqueueKey is the dummy key used to process change in encryption config file. const workqueueKey = "key" -// EncryptionConfigFileChangePollDuration is exposed so that integration tests can crank up the reload speed. -var EncryptionConfigFileChangePollDuration = time.Minute - -// DynamicEncryptionConfigContent which can dynamically handle changes in encryption config file. -type DynamicEncryptionConfigContent struct { +// DynamicKMSEncryptionConfigContent which can dynamically handle changes in encryption config file. +type DynamicKMSEncryptionConfigContent struct { name string // filePath is the path of the file to read. @@ -53,17 +50,6 @@ type DynamicEncryptionConfigContent struct { // dynamicTransformers updates the transformers when encryption config file changes. dynamicTransformers *encryptionconfig.DynamicTransformers - - // identity of the api server - apiServerID string - - // can be swapped during testing - getEncryptionConfigHash func(ctx context.Context, filepath string) (string, error) - loadEncryptionConfig func(ctx context.Context, filepath string, reload bool, apiServerID string) (*encryptionconfig.EncryptionConfiguration, error) -} - -func init() { - metrics.RegisterMetrics() } // NewDynamicEncryptionConfiguration returns controller that dynamically reacts to changes in encryption config file. @@ -71,73 +57,94 @@ func NewDynamicEncryptionConfiguration( name, filePath string, dynamicTransformers *encryptionconfig.DynamicTransformers, configContentHash string, - apiServerID string, -) *DynamicEncryptionConfigContent { - return &DynamicEncryptionConfigContent{ +) *DynamicKMSEncryptionConfigContent { + encryptionConfig := &DynamicKMSEncryptionConfigContent{ name: name, filePath: filePath, lastLoadedEncryptionConfigHash: configContentHash, dynamicTransformers: dynamicTransformers, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), - apiServerID: apiServerID, - getEncryptionConfigHash: func(_ context.Context, filepath string) (string, error) { - return encryptionconfig.GetEncryptionConfigHash(filepath) - }, - loadEncryptionConfig: encryptionconfig.LoadEncryptionConfig, } + encryptionConfig.queue.Add(workqueueKey) // to avoid missing any file changes that occur in between the initial load and Run + + return encryptionConfig } -// Run starts the controller and blocks until ctx is canceled. -func (d *DynamicEncryptionConfigContent) Run(ctx context.Context) { +// Run starts the controller and blocks until stopCh is closed. +func (d *DynamicKMSEncryptionConfigContent) Run(ctx context.Context) { defer utilruntime.HandleCrash() + defer d.queue.ShutDown() klog.InfoS("Starting controller", "name", d.name) defer klog.InfoS("Shutting down controller", "name", d.name) - var wg sync.WaitGroup + // start worker for processing content + go wait.UntilWithContext(ctx, d.runWorker, time.Second) - wg.Add(1) - go func() { - defer utilruntime.HandleCrash() - defer wg.Done() - defer d.queue.ShutDown() - <-ctx.Done() - }() + // start the loop that watches the encryption config file until stopCh is closed. + go wait.UntilWithContext(ctx, func(ctx context.Context) { + if err := d.watchEncryptionConfigFile(ctx); err != nil { + // if there is an error while setting up or handling the watches, this will ensure that we will process the config file. + defer d.queue.Add(workqueueKey) + klog.ErrorS(err, "Failed to watch encryption config file, will retry later") + } + }, time.Second) - wg.Add(1) - go func() { - defer utilruntime.HandleCrash() - defer wg.Done() - d.runWorker(ctx) - }() + <-ctx.Done() +} - // this function polls changes in the encryption config file by placing a dummy key in the queue. - // the 'runWorker' function then picks up this dummy key and processes the changes. - // the goroutine terminates when 'ctx' is canceled. - _ = wait.PollUntilContextCancel( - ctx, - EncryptionConfigFileChangePollDuration, - true, - func(ctx context.Context) (bool, error) { - // add dummy item to the queue to trigger file content processing. - d.queue.Add(workqueueKey) - - // return false to continue polling. - return false, nil - }, - ) +func (d *DynamicKMSEncryptionConfigContent) watchEncryptionConfigFile(ctx context.Context) error { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("error creating fsnotify watcher: %w", err) + } + defer watcher.Close() + + if err = watcher.Add(d.filePath); err != nil { + return fmt.Errorf("error adding watch for file %s: %w", d.filePath, err) + } + + for { + select { + case event := <-watcher.Events: + if err := d.handleWatchEvent(event, watcher); err != nil { + return err + } + case err := <-watcher.Errors: + return fmt.Errorf("received fsnotify error: %w", err) + case <-ctx.Done(): + return nil + } + } +} + +func (d *DynamicKMSEncryptionConfigContent) handleWatchEvent(event fsnotify.Event, watcher *fsnotify.Watcher) error { + // This should be executed after restarting the watch (if applicable) to ensure no file event will be missing. + defer d.queue.Add(workqueueKey) + + // return if file has not been removed or renamed. + if event.Op&(fsnotify.Remove|fsnotify.Rename) == 0 { + return nil + } + + if err := watcher.Remove(d.filePath); err != nil { + klog.V(2).InfoS("Failed to remove file watch, it may have been deleted", "file", d.filePath, "err", err) + } + if err := watcher.Add(d.filePath); err != nil { + return fmt.Errorf("error adding watch for file %s: %w", d.filePath, err) + } - wg.Wait() + return nil } // runWorker to process file content -func (d *DynamicEncryptionConfigContent) runWorker(ctx context.Context) { +func (d *DynamicKMSEncryptionConfigContent) runWorker(ctx context.Context) { for d.processNextWorkItem(ctx) { } } // processNextWorkItem processes file content when there is a message in the queue. -func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.Context) bool { +func (d *DynamicKMSEncryptionConfigContent) processNextWorkItem(serverCtx context.Context) bool { // key here is dummy item in the queue to trigger file content processing. key, quit := d.queue.Get() if quit { @@ -145,12 +152,6 @@ func (d *DynamicEncryptionConfigContent) processNextWorkItem(serverCtx context.C } defer d.queue.Done(key) - d.processWorkItem(serverCtx, key) - - return true -} - -func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Context, workqueueKey interface{}) { var ( updatedEffectiveConfig bool err error @@ -171,32 +172,32 @@ func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Conte } if updatedEffectiveConfig && err == nil { - metrics.RecordEncryptionConfigAutomaticReloadSuccess(d.apiServerID) + metrics.RecordEncryptionConfigAutomaticReloadSuccess() } if err != nil { - metrics.RecordEncryptionConfigAutomaticReloadFailure(d.apiServerID) + metrics.RecordEncryptionConfigAutomaticReloadFailure() utilruntime.HandleError(fmt.Errorf("error processing encryption config file %s: %v", d.filePath, err)) // add dummy item back to the queue to trigger file content processing. - d.queue.AddRateLimited(workqueueKey) + d.queue.AddRateLimited(key) } }() encryptionConfiguration, configChanged, err = d.processEncryptionConfig(ctx) if err != nil { - return + return true } if !configChanged { - return + return true } if len(encryptionConfiguration.HealthChecks) != 1 { err = fmt.Errorf("unexpected number of healthz checks: %d. Should have only one", len(encryptionConfiguration.HealthChecks)) - return + return true } // get healthz checks for all new KMS plugins. if err = d.validateNewTransformersHealth(ctx, encryptionConfiguration.HealthChecks[0], encryptionConfiguration.KMSCloseGracePeriod); err != nil { - return + return true } // update transformers. @@ -213,44 +214,30 @@ func (d *DynamicEncryptionConfigContent) processWorkItem(serverCtx context.Conte klog.V(2).InfoS("Loaded new kms encryption config content", "name", d.name) updatedEffectiveConfig = true + return true } // loadEncryptionConfig processes the next set of content from the file. -func (d *DynamicEncryptionConfigContent) processEncryptionConfig(ctx context.Context) ( - _ *encryptionconfig.EncryptionConfiguration, +func (d *DynamicKMSEncryptionConfigContent) processEncryptionConfig(ctx context.Context) ( + encryptionConfiguration *encryptionconfig.EncryptionConfiguration, configChanged bool, - _ error, + err error, ) { - contentHash, err := d.getEncryptionConfigHash(ctx, d.filePath) - if err != nil { - return nil, false, err - } - - // check if encryptionConfig is different from the current. Do nothing if they are the same. - if contentHash == d.lastLoadedEncryptionConfigHash { - klog.V(4).InfoS("Encryption config has not changed (before load)", "name", d.name) - return nil, false, nil - } - // this code path will only execute if reload=true. So passing true explicitly. - encryptionConfiguration, err := d.loadEncryptionConfig(ctx, d.filePath, true, d.apiServerID) + encryptionConfiguration, err = encryptionconfig.LoadEncryptionConfig(ctx, d.filePath, true) if err != nil { return nil, false, err } - // check if encryptionConfig is different from the current (again to avoid TOCTOU). Do nothing if they are the same. + // check if encryptionConfig is different from the current. Do nothing if they are the same. if encryptionConfiguration.EncryptionFileContentHash == d.lastLoadedEncryptionConfigHash { - klog.V(4).InfoS("Encryption config has not changed (after load)", "name", d.name) + klog.V(4).InfoS("Encryption config has not changed", "name", d.name) return nil, false, nil } - return encryptionConfiguration, true, nil } -// minKMSPluginCloseGracePeriod can be lowered in unit tests to make the health check poll faster -var minKMSPluginCloseGracePeriod = 10 * time.Second - -func (d *DynamicEncryptionConfigContent) validateNewTransformersHealth( +func (d *DynamicKMSEncryptionConfigContent) validateNewTransformersHealth( ctx context.Context, kmsPluginHealthzCheck healthz.HealthChecker, kmsPluginCloseGracePeriod time.Duration, @@ -258,8 +245,8 @@ func (d *DynamicEncryptionConfigContent) validateNewTransformersHealth( // test if new transformers are healthy var healthCheckError error - if kmsPluginCloseGracePeriod < minKMSPluginCloseGracePeriod { - kmsPluginCloseGracePeriod = minKMSPluginCloseGracePeriod + if kmsPluginCloseGracePeriod < 10*time.Second { + kmsPluginCloseGracePeriod = 10 * time.Second } // really make sure that the immediate check does not hang diff --git a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go index 70414035fed..799b584cf7a 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/encryptionconfig/metrics/metrics.go @@ -17,9 +17,6 @@ limitations under the License. package metrics import ( - "crypto/sha256" - "fmt" - "hash" "sync" "k8s.io/component-base/metrics" @@ -32,26 +29,24 @@ const ( ) var ( - encryptionConfigAutomaticReloadFailureTotal = metrics.NewCounterVec( + encryptionConfigAutomaticReloadFailureTotal = metrics.NewCounter( &metrics.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "automatic_reload_failures_total", - Help: "Total number of failed automatic reloads of encryption configuration split by apiserver identity.", + Help: "Total number of failed automatic reloads of encryption configuration.", StabilityLevel: metrics.ALPHA, }, - []string{"apiserver_id_hash"}, ) - encryptionConfigAutomaticReloadSuccessTotal = metrics.NewCounterVec( + encryptionConfigAutomaticReloadSuccessTotal = metrics.NewCounter( &metrics.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "automatic_reload_success_total", - Help: "Total number of successful automatic reloads of encryption configuration split by apiserver identity.", + Help: "Total number of successful automatic reloads of encryption configuration.", StabilityLevel: metrics.ALPHA, }, - []string{"apiserver_id_hash"}, ) encryptionConfigAutomaticReloadLastTimestampSeconds = metrics.NewGaugeVec( @@ -59,53 +54,33 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "automatic_reload_last_timestamp_seconds", - Help: "Timestamp of the last successful or failed automatic reload of encryption configuration split by apiserver identity.", + Help: "Timestamp of the last successful or failed automatic reload of encryption configuration.", StabilityLevel: metrics.ALPHA, }, - []string{"status", "apiserver_id_hash"}, + []string{"status"}, ) ) var registerMetrics sync.Once -var hashPool *sync.Pool func RegisterMetrics() { registerMetrics.Do(func() { - hashPool = &sync.Pool{ - New: func() interface{} { - return sha256.New() - }, - } legacyregistry.MustRegister(encryptionConfigAutomaticReloadFailureTotal) legacyregistry.MustRegister(encryptionConfigAutomaticReloadSuccessTotal) legacyregistry.MustRegister(encryptionConfigAutomaticReloadLastTimestampSeconds) }) } -func RecordEncryptionConfigAutomaticReloadFailure(apiServerID string) { - apiServerIDHash := getHash(apiServerID) - encryptionConfigAutomaticReloadFailureTotal.WithLabelValues(apiServerIDHash).Inc() - recordEncryptionConfigAutomaticReloadTimestamp("failure", apiServerIDHash) +func RecordEncryptionConfigAutomaticReloadFailure() { + encryptionConfigAutomaticReloadFailureTotal.Inc() + recordEncryptionConfigAutomaticReloadTimestamp("failure") } -func RecordEncryptionConfigAutomaticReloadSuccess(apiServerID string) { - apiServerIDHash := getHash(apiServerID) - encryptionConfigAutomaticReloadSuccessTotal.WithLabelValues(apiServerIDHash).Inc() - recordEncryptionConfigAutomaticReloadTimestamp("success", apiServerIDHash) +func RecordEncryptionConfigAutomaticReloadSuccess() { + encryptionConfigAutomaticReloadSuccessTotal.Inc() + recordEncryptionConfigAutomaticReloadTimestamp("success") } -func recordEncryptionConfigAutomaticReloadTimestamp(result, apiServerIDHash string) { - encryptionConfigAutomaticReloadLastTimestampSeconds.WithLabelValues(result, apiServerIDHash).SetToCurrentTime() -} - -func getHash(data string) string { - if len(data) == 0 { - return "" - } - h := hashPool.Get().(hash.Hash) - h.Reset() - h.Write([]byte(data)) - dataHash := fmt.Sprintf("sha256:%x", h.Sum(nil)) - hashPool.Put(h) - return dataHash +func recordEncryptionConfigAutomaticReloadTimestamp(result string) { + encryptionConfigAutomaticReloadLastTimestampSeconds.WithLabelValues(result).SetToCurrentTime() } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go index a1fc3168c5d..57e9c1a9f13 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/etcd.go @@ -26,7 +26,6 @@ import ( "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -45,6 +44,8 @@ import ( ) type EtcdOptions struct { + // The value of Paging on StorageConfig will be overridden by the + // calculated feature gate value. StorageConfig storagebackend.Config EncryptionProviderConfigFilepath string EncryptionProviderConfigAutomaticReload bool @@ -86,12 +87,6 @@ func NewEtcdOptions(backendConfig *storagebackend.Config) *EtcdOptions { return options } -var storageMediaTypes = sets.New( - runtime.ContentTypeJSON, - runtime.ContentTypeYAML, - runtime.ContentTypeProtobuf, -) - func (s *EtcdOptions) Validate() []error { if s == nil { return nil @@ -125,10 +120,6 @@ func (s *EtcdOptions) Validate() []error { allErrors = append(allErrors, fmt.Errorf("--encryption-provider-config-automatic-reload must be set with --encryption-provider-config")) } - if s.DefaultStorageMediaType != "" && !storageMediaTypes.Has(s.DefaultStorageMediaType) { - allErrors = append(allErrors, fmt.Errorf("--storage-media-type %q invalid, allowed values: %s", s.DefaultStorageMediaType, strings.Join(sets.List(storageMediaTypes), ", "))) - } - return allErrors } @@ -303,7 +294,7 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro } }() - encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload, c.APIServerID) + encryptionConfiguration, err := encryptionconfig.LoadEncryptionConfig(ctxTransformers, s.EncryptionProviderConfigFilepath, s.EncryptionProviderConfigAutomaticReload) if err != nil { return err } @@ -327,7 +318,6 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro s.EncryptionProviderConfigFilepath, dynamicTransformers, encryptionConfiguration.EncryptionFileContentHash, - c.APIServerID, ) go dynamicEncryptionConfigController.Run(ctxServer) @@ -341,23 +331,18 @@ func (s *EtcdOptions) maybeApplyResourceTransformers(c *server.Config) (err erro c.ResourceTransformers = dynamicTransformers if !s.SkipHealthEndpoints { - addHealthChecksWithoutLivez(c, dynamicTransformers) + c.AddHealthChecks(dynamicTransformers) } } else { c.ResourceTransformers = encryptionconfig.StaticTransformers(encryptionConfiguration.Transformers) if !s.SkipHealthEndpoints { - addHealthChecksWithoutLivez(c, encryptionConfiguration.HealthChecks...) + c.AddHealthChecks(encryptionConfiguration.HealthChecks...) } } return nil } -func addHealthChecksWithoutLivez(c *server.Config, healthChecks ...healthz.HealthChecker) { - c.HealthzChecks = append(c.HealthzChecks, healthChecks...) - c.ReadyzChecks = append(c.ReadyzChecks, healthChecks...) -} - func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { healthCheck, err := storagefactory.CreateHealthCheck(s.StorageConfig, c.DrainedNotify()) if err != nil { diff --git a/vendor/k8s.io/apiserver/pkg/server/options/feature.go b/vendor/k8s.io/apiserver/pkg/server/options/feature.go index f01195560a4..35596fba692 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/feature.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/feature.go @@ -17,22 +17,16 @@ limitations under the License. package options import ( - "fmt" - "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apiserver/pkg/server" - utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" ) type FeatureOptions struct { EnableProfiling bool DebugSocketPath string EnableContentionProfiling bool - EnablePriorityAndFairness bool } func NewFeatureOptions() *FeatureOptions { @@ -42,7 +36,6 @@ func NewFeatureOptions() *FeatureOptions { EnableProfiling: defaults.EnableProfiling, DebugSocketPath: defaults.DebugSocketPath, EnableContentionProfiling: defaults.EnableContentionProfiling, - EnablePriorityAndFairness: true, } } @@ -57,11 +50,9 @@ func (o *FeatureOptions) AddFlags(fs *pflag.FlagSet) { "Enable block profiling, if profiling is enabled") fs.StringVar(&o.DebugSocketPath, "debug-socket-path", o.DebugSocketPath, "Use an unprotected (no authn/authz) unix-domain socket for profiling with the given path") - fs.BoolVar(&o.EnablePriorityAndFairness, "enable-priority-and-fairness", o.EnablePriorityAndFairness, ""+ - "If true, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") } -func (o *FeatureOptions) ApplyTo(c *server.Config, clientset kubernetes.Interface, informers informers.SharedInformerFactory) error { +func (o *FeatureOptions) ApplyTo(c *server.Config) error { if o == nil { return nil } @@ -70,18 +61,6 @@ func (o *FeatureOptions) ApplyTo(c *server.Config, clientset kubernetes.Interfac c.DebugSocketPath = o.DebugSocketPath c.EnableContentionProfiling = o.EnableContentionProfiling - if o.EnablePriorityAndFairness { - if c.MaxRequestsInFlight+c.MaxMutatingRequestsInFlight <= 0 { - return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight) - - } - c.FlowControl = utilflowcontrol.New( - informers, - clientset.FlowcontrolV1(), - c.MaxRequestsInFlight+c.MaxMutatingRequestsInFlight, - ) - } - return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index eb7e67b3670..5d031e202e0 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -17,15 +17,20 @@ limitations under the License. package options import ( + "fmt" + "github.com/spf13/pflag" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/server" "k8s.io/apiserver/pkg/storage/storagebackend" "k8s.io/apiserver/pkg/util/feature" + utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/component-base/featuregate" + "k8s.io/klog/v2" ) // RecommendedOptions contains the recommended options for running an API server. @@ -117,17 +122,17 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { if err := o.Audit.ApplyTo(&config.Config); err != nil { return err } - if err := o.CoreAPI.ApplyTo(config); err != nil { + if err := o.Features.ApplyTo(&config.Config); err != nil { return err } - kubeClient, err := kubernetes.NewForConfig(config.ClientConfig) - if err != nil { + if err := o.CoreAPI.ApplyTo(config); err != nil { return err } - if err := o.Features.ApplyTo(&config.Config, kubeClient, config.SharedInformerFactory); err != nil { + initializers, err := o.ExtraAdmissionInitializers(config) + if err != nil { return err } - initializers, err := o.ExtraAdmissionInitializers(config) + kubeClient, err := kubernetes.NewForConfig(config.ClientConfig) if err != nil { return err } @@ -139,6 +144,21 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { initializers...); err != nil { return err } + if feature.DefaultFeatureGate.Enabled(features.APIPriorityAndFairness) { + if config.ClientConfig != nil { + if config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight <= 0 { + return fmt.Errorf("invalid configuration: MaxRequestsInFlight=%d and MaxMutatingRequestsInFlight=%d; they must add up to something positive", config.MaxRequestsInFlight, config.MaxMutatingRequestsInFlight) + + } + config.FlowControl = utilflowcontrol.New( + config.SharedInformerFactory, + kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta3(), + config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, + ) + } else { + klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled") + } + } return nil } diff --git a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go index 1373d8a4d73..f9d574d5d2c 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go @@ -62,7 +62,8 @@ type ServerRunOptions struct { // decoded in a write request. 0 means no limit. // We intentionally did not add a flag for this option. Users of the // apiserver library can wire it to a flag. - MaxRequestBodyBytes int64 + MaxRequestBodyBytes int64 + EnablePriorityAndFairness bool // ShutdownSendRetryAfter dictates when to initiate shutdown of the HTTP // Server during the graceful termination of the apiserver. If true, we wait @@ -103,6 +104,7 @@ func NewServerRunOptions() *ServerRunOptions { ShutdownWatchTerminationGracePeriod: defaults.ShutdownWatchTerminationGracePeriod, JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes, MaxRequestBodyBytes: defaults.MaxRequestBodyBytes, + EnablePriorityAndFairness: true, ShutdownSendRetryAfter: false, } } @@ -323,6 +325,9 @@ func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) { "handler, which picks a randomized value above this number as the connection timeout, "+ "to spread out load.") + fs.BoolVar(&s.EnablePriorityAndFairness, "enable-priority-and-fairness", s.EnablePriorityAndFairness, ""+ + "If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness") + fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+ "Time to delay the termination. During that time the server keeps serving requests normally. The endpoints /healthz and /livez "+ "will return success, but /readyz immediately returns failure. Graceful termination starts after this delay "+ diff --git a/vendor/k8s.io/apiserver/pkg/server/options/serving.go b/vendor/k8s.io/apiserver/pkg/server/options/serving.go index 842ab7ee0d1..efda02ef7c9 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/serving.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/serving.go @@ -260,39 +260,7 @@ func (s *SecureServingOptions) ApplyTo(config **server.SecureServingInfo) error c := *config serverCertFile, serverKeyFile := s.ServerCert.CertKey.CertFile, s.ServerCert.CertKey.KeyFile - // load main cert *original description until 2023-08-18* - - /* - kubernetes mutual (2-way) x509 between client and apiserver: - - >1. apiserver sending its apiserver certificate along with its publickey to client - 2. client verifies the apiserver certificate sent against its cluster certificate authority data - 3. client sending its client certificate along with its public key to the apiserver - 4. apiserver verifies the client certificate sent against its cluster certificate authority data - - description: - here, with this block, - apiserver certificate and pub key data (along with priv key)get loaded into server.SecureServingInfo - for client to later in the step 2 verify the apiserver certificate during the handshake - when making a request - - normal args related to this stage: - --tls-cert-file string File containing the default x509 Certificate for HTTPS. - (CA cert, if any, concatenated after server cert). If HTTPS serving is enabled, and - --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate - and key are generated for the public address and saved to the directory specified by - --cert-dir - --tls-private-key-file string File containing the default x509 private key matching --tls-cert-file. - - (retrievable from "kube-apiserver --help" command) - (suggested by @deads2k) - - see also: - - for the step 2, see: staging/src/k8s.io/client-go/transport/transport.go - - for the step 3, see: staging/src/k8s.io/client-go/transport/transport.go - - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go - */ - + // load main cert if len(serverCertFile) != 0 || len(serverKeyFile) != 0 { var err error c.Cert, err = dynamiccertificates.NewDynamicServingContentFromFiles("serving-cert", serverCertFile, serverKeyFile) diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go index 8fd4d559999..ad1eb2835ef 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/metrics.go @@ -17,7 +17,6 @@ limitations under the License. package routes import ( - handlersmetrics "k8s.io/apiserver/pkg/endpoints/handlers/metrics" apimetrics "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/server/mux" cachermetrics "k8s.io/apiserver/pkg/storage/cacher/metrics" @@ -53,5 +52,4 @@ func register() { etcd3metrics.Register() flowcontrolmetrics.Register() peerproxymetrics.Register() - handlersmetrics.Register() } diff --git a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go index 12c8b1ad910..2819d157601 100644 --- a/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go +++ b/vendor/k8s.io/apiserver/pkg/server/routes/openapi.go @@ -32,8 +32,7 @@ import ( // OpenAPI installs spec endpoints for each web service. type OpenAPI struct { - Config *common.Config - V3Config *common.OpenAPIV3Config + Config *common.Config } // Install adds the SwaggerUI webservice to the given mux. @@ -66,7 +65,7 @@ func (oa OpenAPI) InstallV3(c *restful.Container, mux *mux.PathRecorderMux) *han } for gv, ws := range grouped { - spec, err := builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(ws), oa.V3Config) + spec, err := builder3.BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(ws), oa.Config) if err != nil { klog.Errorf("Failed to build OpenAPI v3 for group %s, %q", gv, err) diff --git a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go index 0dc50cea61d..be4d0390d60 100644 --- a/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go +++ b/vendor/k8s.io/apiserver/pkg/server/storage/storage_factory.go @@ -19,13 +19,15 @@ package storage import ( "crypto/tls" "crypto/x509" - "os" + "io/ioutil" "strings" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage/storagebackend" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" ) @@ -112,6 +114,8 @@ type groupResourceOverrides struct { // decoderDecoratorFn is optional and may wrap the provided decoders (can add new decoders). The order of // returned decoders will be priority for attempt to decode. decoderDecoratorFn func([]runtime.Decoder) []runtime.Decoder + // disablePaging will prevent paging on the provided resource. + disablePaging bool } // Apply overrides the provided config and options if the override has a value in that position @@ -135,6 +139,9 @@ func (o groupResourceOverrides) Apply(config *storagebackend.Config, options *St if o.decoderDecoratorFn != nil { options.DecoderDecoratorFn = o.decoderDecoratorFn } + if o.disablePaging { + config.Paging = false + } } var _ StorageFactory = &DefaultStorageFactory{} @@ -149,6 +156,7 @@ func NewDefaultStorageFactory( resourceConfig APIResourceConfigSource, specialDefaultResourcePrefixes map[schema.GroupResource]string, ) *DefaultStorageFactory { + config.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) if len(defaultMediaType) == 0 { defaultMediaType = runtime.ContentTypeJSON } @@ -177,6 +185,14 @@ func (s *DefaultStorageFactory) SetEtcdPrefix(groupResource schema.GroupResource s.Overrides[groupResource] = overrides } +// SetDisableAPIListChunking allows a specific resource to disable paging at the storage layer, to prevent +// exposure of key names in continuations. This may be overridden by feature gates. +func (s *DefaultStorageFactory) SetDisableAPIListChunking(groupResource schema.GroupResource) { + overrides := s.Overrides[groupResource] + overrides.disablePaging = true + s.Overrides[groupResource] = overrides +} + // SetResourceEtcdPrefix sets the prefix for a resource, but not the base-dir. You'll end up in `etcdPrefix/resourceEtcdPrefix`. func (s *DefaultStorageFactory) SetResourceEtcdPrefix(groupResource schema.GroupResource, prefix string) { overrides := s.Overrides[groupResource] @@ -321,7 +337,7 @@ func backends(storageConfig storagebackend.Config, grOverrides map[schema.GroupR } } if len(storageConfig.Transport.TrustedCAFile) > 0 { - if caCert, err := os.ReadFile(storageConfig.Transport.TrustedCAFile); err != nil { + if caCert, err := ioutil.ReadFile(storageConfig.Transport.TrustedCAFile); err != nil { klog.Errorf("failed to read ca file while getting backends: %s", err) } else { caPool := x509.NewCertPool() diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go index 595fd5036d0..478d2151d9c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cache_watcher.go @@ -22,6 +22,7 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -176,6 +177,7 @@ func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool { // This means that we couldn't send event to that watcher. // Since we don't want to block on it infinitely, // we simply terminate it. + klog.V(1).Infof("Forcing %v watcher close due to unresponsiveness: %v. len(c.input) = %v, len(c.result) = %v", c.groupResource.String(), c.identifier, len(c.input), len(c.result)) metrics.TerminatedWatchersCounter.WithLabelValues(c.groupResource.String()).Inc() // This means that we couldn't send event to that watcher. // Since we don't want to block on it infinitely, we simply terminate it. @@ -363,10 +365,17 @@ func (c *cacheWatcher) convertToWatchEvent(event *watchCacheEvent) *watch.Event if event.Type == watch.Bookmark { e := &watch.Event{Type: watch.Bookmark, Object: event.Object.DeepCopyObject()} if !c.wasBookmarkAfterRvSent() { - if err := storage.AnnotateInitialEventsEndBookmark(e.Object); err != nil { + objMeta, err := meta.Accessor(e.Object) + if err != nil { utilruntime.HandleError(fmt.Errorf("error while accessing object's metadata gr: %v, identifier: %v, obj: %#v, err: %v", c.groupResource, c.identifier, e.Object, err)) return nil } + objAnnotations := objMeta.GetAnnotations() + if objAnnotations == nil { + objAnnotations = map[string]string{} + } + objAnnotations["k8s.io/initial-events-end"] = "true" + objMeta.SetAnnotations(objAnnotations) } return e } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 900f300cd5f..e34fb498630 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" "reflect" + "strconv" "sync" "time" @@ -113,8 +114,11 @@ func (wm watchersMap) addWatcher(w *cacheWatcher, number int) { wm[number] = w } -func (wm watchersMap) deleteWatcher(number int) { - delete(wm, number) +func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) { + if watcher, ok := wm[number]; ok { + delete(wm, number) + done(watcher) + } } func (wm watchersMap) terminateAll(done func(*cacheWatcher)) { @@ -145,14 +149,14 @@ func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, scope namespac } } -func (i *indexedWatchers) deleteWatcher(number int, scope namespacedName, value string, supported bool) { +func (i *indexedWatchers) deleteWatcher(number int, scope namespacedName, value string, supported bool, done func(*cacheWatcher)) { if supported { - i.valueWatchers[value].deleteWatcher(number) + i.valueWatchers[value].deleteWatcher(number, done) if len(i.valueWatchers[value]) == 0 { delete(i.valueWatchers, value) } } else { - i.allWatchers[scope].deleteWatcher(number) + i.allWatchers[scope].deleteWatcher(number, done) if len(i.allWatchers[scope]) == 0 { delete(i.allWatchers, scope) } @@ -730,14 +734,15 @@ func shouldDelegateList(opts storage.ListOptions) bool { resourceVersion := opts.ResourceVersion pred := opts.Predicate match := opts.ResourceVersionMatch + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) // Serve consistent reads from storage if ConsistentListFromCache is disabled consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled // Watch cache doesn't support continuations, so serve them from etcd. - hasContinuation := len(pred.Continue) > 0 + hasContinuation := pagingEnabled && len(pred.Continue) > 0 // Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd. - hasLimit := pred.Limit > 0 && resourceVersion != "0" + hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0" // Watch cache only supports ResourceVersionMatchNotOlderThan (default). unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan @@ -777,7 +782,7 @@ func (c *Cacher) GetList(ctx context.Context, key string, opts storage.ListOptio return c.storage.GetList(ctx, key, opts, listObj) } if listRV == 0 && utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) { - listRV, err = storage.GetCurrentResourceVersionFromStorage(ctx, c.storage, c.newListFunc, c.resourcePrefix, c.objectType.String()) + listRV, err = c.getCurrentResourceVersionFromStorage(ctx) if err != nil { return err } @@ -1229,8 +1234,7 @@ func forgetWatcher(c *Cacher, w *cacheWatcher, index int, scope namespacedName, // It's possible that the watcher is already not in the structure (e.g. in case of // simultaneous Stop() and terminateAllWatchers(), but it is safe to call stopLocked() // on a watcher multiple times. - c.watchers.deleteWatcher(index, scope, triggerValue, triggerSupported) - c.stopWatcherLocked(w) + c.watchers.deleteWatcher(index, scope, triggerValue, triggerSupported, c.stopWatcherLocked) } } @@ -1254,12 +1258,48 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) { return c.versioner.ParseResourceVersion(resourceVersion) } +// getCurrentResourceVersionFromStorage gets the current resource version from the underlying storage engine. +// this method issues an empty list request and reads only the ResourceVersion from the object metadata +func (c *Cacher) getCurrentResourceVersionFromStorage(ctx context.Context) (uint64, error) { + if c.newListFunc == nil { + return 0, fmt.Errorf("newListFunction wasn't provided for %v", c.objectType) + } + emptyList := c.newListFunc() + pred := storage.SelectionPredicate{ + Label: labels.Everything(), + Field: fields.Everything(), + Limit: 1, // just in case we actually hit something + } + + err := c.storage.GetList(ctx, c.resourcePrefix, storage.ListOptions{Predicate: pred}, emptyList) + if err != nil { + return 0, err + } + emptyListAccessor, err := meta.ListAccessor(emptyList) + if err != nil { + return 0, err + } + if emptyListAccessor == nil { + return 0, fmt.Errorf("unable to extract a list accessor from %T", emptyList) + } + + currentResourceVersion, err := strconv.Atoi(emptyListAccessor.GetResourceVersion()) + if err != nil { + return 0, err + } + + if currentResourceVersion == 0 { + return 0, fmt.Errorf("the current resource version must be greater than 0") + } + return uint64(currentResourceVersion), nil +} + // getBookmarkAfterResourceVersionLockedFunc returns a function that // spits a ResourceVersion after which the bookmark event will be delivered. // // The returned function must be called under the watchCache lock. func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, parsedResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks { + if opts.SendInitialEvents == nil || *opts.SendInitialEvents == false || !opts.Predicate.AllowWatchBookmarks { return func() uint64 { return 0 }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedResourceVersion, opts) @@ -1274,7 +1314,7 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, // // The returned function must be called under the watchCache lock. func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents { + if opts.SendInitialEvents == nil || *opts.SendInitialEvents == true { return func() uint64 { return parsedWatchResourceVersion }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedWatchResourceVersion, opts) @@ -1287,7 +1327,7 @@ func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { switch { case len(opts.ResourceVersion) == 0: - rv, err := storage.GetCurrentResourceVersionFromStorage(ctx, c.storage, c.newListFunc, c.resourcePrefix, c.objectType.String()) + rv, err := c.getCurrentResourceVersionFromStorage(ctx) if err != nil { return nil, err } @@ -1305,7 +1345,7 @@ func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedW // Additionally, it instructs the caller whether it should ask for // all events from the cache (full state) or not. func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) (bool, error) { - if opts.SendInitialEvents != nil && *opts.SendInitialEvents { + if opts.SendInitialEvents != nil && *opts.SendInitialEvents == true { err := c.watchCache.waitUntilFreshAndBlock(ctx, requestedWatchRV) defer c.watchCache.RUnlock() return err == nil, err diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go index 2b57dd16509..c455357e04d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache_interval.go @@ -18,7 +18,6 @@ package cacher import ( "fmt" - "sort" "sync" "k8s.io/apimachinery/pkg/fields" @@ -115,24 +114,9 @@ func newCacheInterval(startIndex, endIndex int, indexer indexerFunc, indexValida } } -type sortableWatchCacheEvents []*watchCacheEvent - -func (s sortableWatchCacheEvents) Len() int { - return len(s) -} - -func (s sortableWatchCacheEvents) Less(i, j int) bool { - return s[i].Key < s[j].Key -} - -func (s sortableWatchCacheEvents) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - // newCacheIntervalFromStore is meant to handle the case of rv=0, such that the events // returned by Next() need to be events from a List() done on the underlying store of // the watch cache. -// The items returned in the interval will be sorted by Key. func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getAttrsFunc attrFunc) (*watchCacheInterval, error) { buffer := &watchCacheIntervalBuffer{} allItems := store.List() @@ -156,7 +140,6 @@ func newCacheIntervalFromStore(resourceVersion uint64, store cache.Indexer, getA } buffer.endIndex++ } - sort.Sort(sortableWatchCacheEvents(buffer.buffer)) ci := &watchCacheInterval{ startIndex: 0, // Simulate that we already have all the events we're looking for. diff --git a/vendor/k8s.io/apiserver/pkg/storage/errors.go b/vendor/k8s.io/apiserver/pkg/storage/errors.go index 5f29097c59c..ed4f4d0d0e8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/errors.go +++ b/vendor/k8s.io/apiserver/pkg/storage/errors.go @@ -17,16 +17,13 @@ limitations under the License. package storage import ( - "errors" "fmt" - apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" ) -var ErrResourceVersionSetOnCreate = errors.New("resourceVersion should not be set on objects to be created") - const ( ErrCodeKeyNotFound int = iota + 1 ErrCodeKeyExists @@ -179,7 +176,7 @@ var tooLargeResourceVersionCauseMsg = "Too large resource version" // NewTooLargeResourceVersionError returns a timeout error with the given retrySeconds for a request for // a minimum resource version that is larger than the largest currently available resource version for a requested resource. func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error { - err := apierrors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) + err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds) err.ErrStatus.Details.Causes = []metav1.StatusCause{ { Type: metav1.CauseTypeResourceVersionTooLarge, @@ -191,8 +188,8 @@ func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uin // IsTooLargeResourceVersion returns true if the error is a TooLargeResourceVersion error. func IsTooLargeResourceVersion(err error) bool { - if !apierrors.IsTimeout(err) { + if !errors.IsTimeout(err) { return false } - return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) + return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go index e7644ddfae6..3e5bfb1c633 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/event.go @@ -30,17 +30,6 @@ type event struct { isDeleted bool isCreated bool isProgressNotify bool - // isInitialEventsEndBookmark helps us keep track - // of whether we have sent an annotated bookmark event. - // - // when this variable is set to true, - // a special annotation will be added - // to the bookmark event. - // - // note that we decided to extend the event - // struct field to eliminate contention - // between startWatching and processEvent - isInitialEventsEndBookmark bool } // parseKV converts a KeyValue retrieved from an initial sync() listing to a synthetic isCreated event. diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index fadc87d53de..ac023d55d8c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -69,7 +69,7 @@ var ( objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_storage_objects", - Help: "Number of stored objects at the time of last check split by kind. In case of a fetching error, the value will be -1.", + Help: "Number of stored objects at the time of last check split by kind.", StabilityLevel: compbasemetrics.STABLE, }, []string{"resource"}, @@ -228,7 +228,7 @@ func UpdateEtcdDbSize(ep string, size int64) { // SetStorageMonitorGetter sets monitor getter to allow monitoring etcd stats. func SetStorageMonitorGetter(getter func() ([]Monitor, error)) { - storageMonitor.setGetter(getter) + storageMonitor.monitorGetter = getter } // UpdateLeaseObjectCount sets the etcd_lease_object_counts metric. @@ -258,22 +258,9 @@ type StorageMetrics struct { type monitorCollector struct { compbasemetrics.BaseStableCollector - mutex sync.Mutex monitorGetter func() ([]Monitor, error) } -func (m *monitorCollector) setGetter(monitorGetter func() ([]Monitor, error)) { - m.mutex.Lock() - defer m.mutex.Unlock() - m.monitorGetter = monitorGetter -} - -func (m *monitorCollector) getGetter() func() ([]Monitor, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - return m.monitorGetter -} - // DescribeWithStability implements compbasemetrics.StableColletor func (c *monitorCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc) { ch <- storageSizeDescription @@ -281,7 +268,7 @@ func (c *monitorCollector) DescribeWithStability(ch chan<- *compbasemetrics.Desc // CollectWithStability implements compbasemetrics.StableColletor func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric) { - monitors, err := c.getGetter()() + monitors, err := c.monitorGetter() if err != nil { return } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index 1e5e40d6235..7374152239c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -32,15 +32,19 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/tracing" "k8s.io/klog/v2" ) @@ -77,6 +81,7 @@ type store struct { groupResource schema.GroupResource groupResourceString string watcher *watcher + pagingEnabled bool leaseManager *leaseManager } @@ -95,11 +100,11 @@ type objState struct { } // New returns an etcd3 implementation of storage.Interface. -func New(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) storage.Interface { - return newStore(c, codec, newFunc, newListFunc, prefix, resourcePrefix, groupResource, transformer, leaseManagerConfig) +func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) storage.Interface { + return newStore(c, codec, newFunc, prefix, groupResource, transformer, pagingEnabled, leaseManagerConfig) } -func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func() runtime.Object, prefix, resourcePrefix string, groupResource schema.GroupResource, transformer value.Transformer, leaseManagerConfig LeaseManagerConfig) *store { +func newStore(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, groupResource schema.GroupResource, transformer value.Transformer, pagingEnabled bool, leaseManagerConfig LeaseManagerConfig) *store { versioner := storage.APIObjectVersioner{} // for compatibility with etcd2 impl. // no-op for default prefix of '/registry'. @@ -109,36 +114,19 @@ func newStore(c *clientv3.Client, codec runtime.Codec, newFunc, newListFunc func // Ensure the pathPrefix ends in "/" here to simplify key concatenation later. pathPrefix += "/" } - - w := &watcher{ - client: c, - codec: codec, - newFunc: newFunc, - groupResource: groupResource, - versioner: versioner, - transformer: transformer, - } - if newFunc == nil { - w.objectType = "" - } else { - w.objectType = reflect.TypeOf(newFunc()).String() - } - s := &store{ + result := &store{ client: c, codec: codec, versioner: versioner, transformer: transformer, + pagingEnabled: pagingEnabled, pathPrefix: pathPrefix, groupResource: groupResource, groupResourceString: groupResource.String(), - watcher: w, + watcher: newWatcher(c, codec, groupResource, newFunc, versioner), leaseManager: newDefaultLeaseManager(c, leaseManagerConfig), } - - w.getCurrentStorageRV = func(ctx context.Context) (uint64, error) { - return storage.GetCurrentResourceVersionFromStorage(ctx, s, newListFunc, resourcePrefix, w.objectType) - } - return s + return result } // Versioner implements storage.Interface.Versioner. @@ -197,7 +185,7 @@ func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ) defer span.End(500 * time.Millisecond) if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 { - return storage.ErrResourceVersionSetOnCreate + return errors.New("resourceVersion should not be set on objects to be created") } if err := s.versioner.PrepareObjectForStorage(obj); err != nil { return fmt.Errorf("PrepareObjectForStorage failed: %v", err) @@ -270,7 +258,15 @@ func (s *store) Delete( func (s *store) conditionalDelete( ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions, validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error { - getCurrentState := s.getCurrentState(ctx, key, v, false) + getCurrentState := func() (*objState, error) { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, key) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) + if err != nil { + return nil, err + } + return s.getState(ctx, getResp, key, v, false) + } var origState *objState var err error @@ -398,7 +394,15 @@ func (s *store) GuaranteedUpdate( return fmt.Errorf("unable to convert output object to pointer: %v", err) } - getCurrentState := s.getCurrentState(ctx, preparedKey, v, ignoreNotFound) + getCurrentState := func() (*objState, error) { + startTime := time.Now() + getResp, err := s.client.KV.Get(ctx, preparedKey) + metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) + if err != nil { + return nil, err + } + return s.getState(ctx, getResp, preparedKey, v, ignoreNotFound) + } var origState *objState var origStateIsCurrent bool @@ -590,13 +594,17 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption if err != nil { return err } - ctx, span := tracing.Start(ctx, fmt.Sprintf("List(recursive=%v) etcd3", opts.Recursive), + recursive := opts.Recursive + resourceVersion := opts.ResourceVersion + match := opts.ResourceVersionMatch + pred := opts.Predicate + ctx, span := tracing.Start(ctx, fmt.Sprintf("List(recursive=%v) etcd3", recursive), attribute.String("audit-id", audit.GetAuditIDTruncated(ctx)), attribute.String("key", key), - attribute.String("resourceVersion", opts.ResourceVersion), - attribute.String("resourceVersionMatch", string(opts.ResourceVersionMatch)), - attribute.Int("limit", int(opts.Predicate.Limit)), - attribute.String("continue", opts.Predicate.Continue)) + attribute.String("resourceVersion", resourceVersion), + attribute.String("resourceVersionMatch", string(match)), + attribute.Int("limit", int(pred.Limit)), + attribute.String("continue", pred.Continue)) defer span.End(500 * time.Millisecond) listPtr, err := meta.GetItemsPtr(listObj) if err != nil { @@ -611,68 +619,97 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // get children "directories". e.g. if we have key "/a", "/a/b", "/ab", getting keys // with prefix "/a" will return all three, while with prefix "/a/" will return only // "/a/b" which is the correct answer. - if opts.Recursive && !strings.HasSuffix(preparedKey, "/") { + if recursive && !strings.HasSuffix(preparedKey, "/") { preparedKey += "/" } keyPrefix := preparedKey // set the appropriate clientv3 options to filter the returned data set var limitOption *clientv3.OpOption - limit := opts.Predicate.Limit + limit := pred.Limit var paging bool options := make([]clientv3.OpOption, 0, 4) - if opts.Predicate.Limit > 0 { + if s.pagingEnabled && pred.Limit > 0 { paging = true options = append(options, clientv3.WithLimit(limit)) limitOption = &options[len(options)-1] } - if opts.Recursive { - rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) - options = append(options, clientv3.WithRange(rangeEnd)) - } - newItemFunc := getNewItemFunc(listObj, v) - var continueRV, withRev int64 + var fromRV *uint64 + if len(resourceVersion) > 0 { + parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion) + if err != nil { + return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + } + fromRV = &parsedRV + } + + var returnedRV, continueRV, withRev int64 var continueKey string switch { - case opts.Recursive && len(opts.Predicate.Continue) > 0: - continueKey, continueRV, err = storage.DecodeContinue(opts.Predicate.Continue, keyPrefix) + case recursive && s.pagingEnabled && len(pred.Continue) > 0: + continueKey, continueRV, err = storage.DecodeContinue(pred.Continue, keyPrefix) if err != nil { return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err)) } - if len(opts.ResourceVersion) > 0 && opts.ResourceVersion != "0" { + if len(resourceVersion) > 0 && resourceVersion != "0" { return apierrors.NewBadRequest("specifying resource version is not allowed when using continue") } + + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) preparedKey = continueKey + // If continueRV > 0, the LIST request needs a specific resource version. // continueRV==0 is invalid. // If continueRV < 0, the request is for the latest resource version. if continueRV > 0 { withRev = continueRV + returnedRV = continueRV } - case len(opts.ResourceVersion) > 0: - parsedRV, err := s.versioner.ParseResourceVersion(opts.ResourceVersion) - if err != nil { - return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) + case recursive && s.pagingEnabled && pred.Limit > 0: + if fromRV != nil { + switch match { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + returnedRV = int64(*fromRV) + withRev = returnedRV + case "": // legacy case + if *fromRV > 0 { + returnedRV = int64(*fromRV) + withRev = returnedRV + } + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) + } } - switch opts.ResourceVersionMatch { - case metav1.ResourceVersionMatchNotOlderThan: - // The not older than constraint is checked after we get a response from etcd, - // and returnedRV is then set to the revision we get from the etcd response. - case metav1.ResourceVersionMatchExact: - withRev = int64(parsedRV) - case "": // legacy case - if opts.Recursive && opts.Predicate.Limit > 0 && parsedRV > 0 { - withRev = int64(parsedRV) + + rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix) + options = append(options, clientv3.WithRange(rangeEnd)) + default: + if fromRV != nil { + switch match { + case metav1.ResourceVersionMatchNotOlderThan: + // The not older than constraint is checked after we get a response from etcd, + // and returnedRV is then set to the revision we get from the etcd response. + case metav1.ResourceVersionMatchExact: + returnedRV = int64(*fromRV) + withRev = returnedRV + case "": // legacy case + default: + return fmt.Errorf("unknown ResourceVersionMatch value: %v", match) } - default: - return fmt.Errorf("unknown ResourceVersionMatch value: %v", opts.ResourceVersionMatch) } - } + if recursive { + options = append(options, clientv3.WithPrefix()) + } + } if withRev != 0 { options = append(options, clientv3.WithRev(withRev)) } @@ -691,7 +728,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption }() metricsOp := "get" - if opts.Recursive { + if recursive { metricsOp = "list" } @@ -700,10 +737,10 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption getResp, err = s.client.KV.Get(ctx, preparedKey, options...) metrics.RecordEtcdRequest(metricsOp, s.groupResourceString, err, startTime) if err != nil { - return interpretListError(err, len(opts.Predicate.Continue) > 0, continueKey, keyPrefix) + return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix) } numFetched += len(getResp.Kvs) - if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil { + if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil { return err } hasMore = getResp.More @@ -711,15 +748,10 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption if len(getResp.Kvs) == 0 && getResp.More { return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") } - // indicate to the client which resource version was returned, and use the same resource version for subsequent requests. - if withRev == 0 { - withRev = getResp.Header.Revision - options = append(options, clientv3.WithRev(withRev)) - } // avoid small allocations for the result slice, since this can be called in many // different contexts and we don't know how significantly the result will be filtered - if opts.Predicate.Empty() { + if pred.Empty() { growSlice(v, len(getResp.Kvs)) } else { growSlice(v, 2048, len(getResp.Kvs)) @@ -727,7 +759,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // take items from the response until the bucket is full, filtering as we go for i, kv := range getResp.Kvs { - if paging && int64(v.Len()) >= opts.Predicate.Limit { + if paging && int64(v.Len()) >= pred.Limit { hasMore = true break } @@ -738,7 +770,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err) } - if err := appendListItem(v, data, uint64(kv.ModRevision), opts.Predicate, s.codec, s.versioner, newItemFunc); err != nil { + if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil { recordDecodeError(s.groupResourceString, string(kv.Key)) return err } @@ -748,12 +780,17 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption getResp.Kvs[i] = nil } + // indicate to the client which resource version was returned + if returnedRV == 0 { + returnedRV = getResp.Header.Revision + } + // no more results remain or we didn't request paging if !hasMore || !paging { break } // we're paging but we have filled our bucket - if int64(v.Len()) >= opts.Predicate.Limit { + if int64(v.Len()) >= pred.Limit { break } @@ -767,8 +804,11 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption *limitOption = clientv3.WithLimit(limit) } preparedKey = string(lastKey) + "\x00" + if withRev == 0 { + withRev = returnedRV + options = append(options, clientv3.WithRev(withRev)) + } } - if v.IsNil() { // Ensure that we never return a nil Items pointer in the result for consistency. v.Set(reflect.MakeSlice(v.Type(), 0, 0)) @@ -778,7 +818,7 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // we never return a key that the client wouldn't be allowed to see if hasMore { // we want to start immediately after the last key - next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, withRev) + next, err := storage.EncodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV) if err != nil { return err } @@ -786,15 +826,17 @@ func (s *store) GetList(ctx context.Context, key string, opts storage.ListOption // getResp.Count counts in objects that do not match the pred. // Instead of returning inaccurate count for non-empty selectors, we return nil. // Only set remainingItemCount if the predicate is empty. - if opts.Predicate.Empty() { - c := int64(getResp.Count - opts.Predicate.Limit) - remainingItemCount = &c + if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) { + if pred.Empty() { + c := int64(getResp.Count - pred.Limit) + remainingItemCount = &c + } } - return s.versioner.UpdateList(listObj, uint64(withRev), next, remainingItemCount) + return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount) } // no continuation - return s.versioner.UpdateList(listObj, uint64(withRev), "", nil) + return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil) } // growSlice takes a slice value and grows its capacity up @@ -829,7 +871,18 @@ func growSlice(v reflect.Value, maxCapacity int, sizes ...int) { } // Watch implements storage.Interface.Watch. +// TODO(#115478): In order to graduate the WatchList feature to beta, the etcd3 implementation must/should also support it. func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) { + // it is safe to skip SendInitialEvents if the request is backward compatible + // see https://github.com/kubernetes/kubernetes/blob/267eb25e60955fe8e438c6311412e7cf7d028acb/staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go#L260 + compatibility := opts.Predicate.AllowWatchBookmarks == false && (opts.ResourceVersion == "" || opts.ResourceVersion == "0") + if opts.SendInitialEvents != nil && !compatibility { + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: s.groupResource.Group, Kind: s.groupResource.Resource}, + "", + field.ErrorList{field.Forbidden(field.NewPath("sendInitialEvents"), "for watch is unsupported by an etcd cluster")}, + ) + } preparedKey, err := s.prepareKey(key) if err != nil { return nil, err @@ -838,7 +891,7 @@ func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) if err != nil { return nil, err } - return s.watcher.Watch(s.watchContext(ctx), preparedKey, int64(rev), opts) + return s.watcher.Watch(s.watchContext(ctx), preparedKey, int64(rev), opts.Recursive, opts.ProgressNotify, s.transformer, opts.Predicate) } func (s *store) watchContext(ctx context.Context) context.Context { @@ -852,18 +905,6 @@ func (s *store) watchContext(ctx context.Context) context.Context { return clientv3.WithRequireLeader(ctx) } -func (s *store) getCurrentState(ctx context.Context, key string, v reflect.Value, ignoreNotFound bool) func() (*objState, error) { - return func() (*objState, error) { - startTime := time.Now() - getResp, err := s.client.KV.Get(ctx, key) - metrics.RecordEtcdRequest("get", s.groupResourceString, err, startTime) - if err != nil { - return nil, err - } - return s.getState(ctx, getResp, key, v, ignoreNotFound) - } -} - func (s *store) getState(ctx context.Context, getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) { state := &objState{ meta: &storage.ResponseMeta{}, diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go index 85acf44f86b..d4929bd9d82 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go @@ -18,29 +18,27 @@ package etcd3 import ( "context" - "errors" "fmt" "os" + "reflect" "strconv" "strings" "sync" - "time" - clientv3 "go.etcd.io/etcd/client/v3" grpccodes "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/etcd3/metrics" "k8s.io/apiserver/pkg/storage/value" - utilfeature "k8s.io/apiserver/pkg/util/feature" utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol" + + clientv3 "go.etcd.io/etcd/client/v3" + "k8s.io/klog/v2" ) @@ -50,9 +48,6 @@ const ( outgoingBufSize = 100 ) -// defaultWatcherMaxLimit is used to facilitate construction tests -var defaultWatcherMaxLimit int64 = maxLimit - // fatalOnDecodeError is used during testing to panic the server if watcher encounters a decoding error var fatalOnDecodeError = false @@ -68,19 +63,18 @@ func TestOnlySetFatalOnDecodeError(b bool) { } type watcher struct { - client *clientv3.Client - codec runtime.Codec - newFunc func() runtime.Object - objectType string - groupResource schema.GroupResource - versioner storage.Versioner - transformer value.Transformer - getCurrentStorageRV func(context.Context) (uint64, error) + client *clientv3.Client + codec runtime.Codec + newFunc func() runtime.Object + objectType string + groupResource schema.GroupResource + versioner storage.Versioner } // watchChan implements watch.Interface. type watchChan struct { watcher *watcher + transformer value.Transformer key string initialRev int64 recursive bool @@ -93,26 +87,35 @@ type watchChan struct { errChan chan error } +func newWatcher(client *clientv3.Client, codec runtime.Codec, groupResource schema.GroupResource, newFunc func() runtime.Object, versioner storage.Versioner) *watcher { + res := &watcher{ + client: client, + codec: codec, + groupResource: groupResource, + newFunc: newFunc, + versioner: versioner, + } + if newFunc == nil { + res.objectType = "" + } else { + res.objectType = reflect.TypeOf(newFunc()).String() + } + return res +} + // Watch watches on a key and returns a watch.Interface that transfers relevant notifications. // If rev is zero, it will return the existing object(s) and then start watching from // the maximum revision+1 from returned objects. // If rev is non-zero, it will watch events happened after given revision. -// If opts.Recursive is false, it watches on given key. -// If opts.Recursive is true, it watches any children and directories under the key, excluding the root key itself. -// pred must be non-nil. Only if opts.Predicate matches the change, it will be returned. -func (w *watcher) Watch(ctx context.Context, key string, rev int64, opts storage.ListOptions) (watch.Interface, error) { - if opts.Recursive && !strings.HasSuffix(key, "/") { +// If recursive is false, it watches on given key. +// If recursive is true, it watches any children and directories under the key, excluding the root key itself. +// pred must be non-nil. Only if pred matches the change, it will be returned. +func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive, progressNotify bool, transformer value.Transformer, pred storage.SelectionPredicate) (watch.Interface, error) { + if recursive && !strings.HasSuffix(key, "/") { key += "/" } - if opts.ProgressNotify && w.newFunc == nil { - return nil, apierrors.NewInternalError(errors.New("progressNotify for watch is unsupported by the etcd storage because no newFunc was provided")) - } - startWatchRV, err := w.getStartWatchResourceVersion(ctx, rev, opts) - if err != nil { - return nil, err - } - wc := w.createWatchChan(ctx, key, startWatchRV, opts.Recursive, opts.ProgressNotify, opts.Predicate) - go wc.run(isInitialEventsEndBookmarkRequired(opts), areInitialEventsRequired(rev, opts)) + wc := w.createWatchChan(ctx, key, rev, recursive, progressNotify, transformer, pred) + go wc.run() // For etcd watch we don't have an easy way to answer whether the watch // has already caught up. So in the initial version (given that watchcache @@ -124,9 +127,10 @@ func (w *watcher) Watch(ctx context.Context, key string, rev int64, opts storage return wc, nil } -func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, pred storage.SelectionPredicate) *watchChan { +func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive, progressNotify bool, transformer value.Transformer, pred storage.SelectionPredicate) *watchChan { wc := &watchChan{ watcher: w, + transformer: transformer, key: key, initialRev: rev, recursive: recursive, @@ -144,62 +148,6 @@ func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, re return wc } -// getStartWatchResourceVersion returns a ResourceVersion -// the watch will be started from. -// Depending on the input parameters the semantics of the returned ResourceVersion are: -// - start at Exact (return resourceVersion) -// - start at Most Recent (return an RV from etcd) -func (w *watcher) getStartWatchResourceVersion(ctx context.Context, resourceVersion int64, opts storage.ListOptions) (int64, error) { - if resourceVersion > 0 { - return resourceVersion, nil - } - if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { - return 0, nil - } - if opts.SendInitialEvents == nil || *opts.SendInitialEvents { - // note that when opts.SendInitialEvents=true - // we will be issuing a consistent LIST request - // against etcd followed by the special bookmark event - return 0, nil - } - // at this point the clients is interested - // only in getting a stream of events - // starting at the MostRecent point in time (RV) - currentStorageRV, err := w.getCurrentStorageRV(ctx) - if err != nil { - return 0, err - } - // currentStorageRV is taken from resp.Header.Revision (int64) - // and cast to uint64, so it is safe to do reverse - // at some point we should unify the interface but that - // would require changing Versioner.UpdateList - return int64(currentStorageRV), nil -} - -// isInitialEventsEndBookmarkRequired since there is no way to directly set -// opts.ProgressNotify from the API and the etcd3 impl doesn't support -// notification for external clients we simply return initialEventsEndBookmarkRequired -// to only send the bookmark event after the initial list call. -// -// see: https://github.com/kubernetes/kubernetes/issues/120348 -func isInitialEventsEndBookmarkRequired(opts storage.ListOptions) bool { - if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { - return false - } - return opts.SendInitialEvents != nil && *opts.SendInitialEvents && opts.Predicate.AllowWatchBookmarks -} - -// areInitialEventsRequired returns true if all events from the etcd should be returned. -func areInitialEventsRequired(resourceVersion int64, opts storage.ListOptions) bool { - if opts.SendInitialEvents == nil && resourceVersion == 0 { - return true // legacy case - } - if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) { - return false - } - return opts.SendInitialEvents != nil && *opts.SendInitialEvents -} - type etcdError interface { Code() grpccodes.Code Error() string @@ -225,9 +173,9 @@ func isCancelError(err error) bool { return false } -func (wc *watchChan) run(initialEventsEndBookmarkRequired, forceInitialEvents bool) { +func (wc *watchChan) run() { watchClosedCh := make(chan struct{}) - go wc.startWatching(watchClosedCh, initialEventsEndBookmarkRequired, forceInitialEvents) + go wc.startWatching(watchClosedCh) var resultChanWG sync.WaitGroup resultChanWG.Add(1) @@ -277,58 +225,17 @@ func (wc *watchChan) RequestWatchProgress() error { func (wc *watchChan) sync() error { opts := []clientv3.OpOption{} if wc.recursive { - opts = append(opts, clientv3.WithLimit(defaultWatcherMaxLimit)) - rangeEnd := clientv3.GetPrefixRangeEnd(wc.key) - opts = append(opts, clientv3.WithRange(rangeEnd)) + opts = append(opts, clientv3.WithPrefix()) } - - var err error - var lastKey []byte - var withRev int64 - var getResp *clientv3.GetResponse - - metricsOp := "get" - if wc.recursive { - metricsOp = "list" + getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...) + if err != nil { + return err } - - preparedKey := wc.key - - for { - startTime := time.Now() - getResp, err = wc.watcher.client.KV.Get(wc.ctx, preparedKey, opts...) - metrics.RecordEtcdRequest(metricsOp, wc.watcher.groupResource.String(), err, startTime) - if err != nil { - return interpretListError(err, true, preparedKey, wc.key) - } - - if len(getResp.Kvs) == 0 && getResp.More { - return fmt.Errorf("no results were found, but etcd indicated there were more values remaining") - } - - // send items from the response until no more results - for i, kv := range getResp.Kvs { - lastKey = kv.Key - wc.sendEvent(parseKV(kv)) - // free kv early. Long lists can take O(seconds) to decode. - getResp.Kvs[i] = nil - } - - if withRev == 0 { - wc.initialRev = getResp.Header.Revision - } - - // no more results remain - if !getResp.More { - return nil - } - - preparedKey = string(lastKey) + "\x00" - if withRev == 0 { - withRev = getResp.Header.Revision - opts = append(opts, clientv3.WithRev(withRev)) - } + wc.initialRev = getResp.Header.Revision + for _, kv := range getResp.Kvs { + wc.sendEvent(parseKV(kv)) } + return nil } func logWatchChannelErr(err error) { @@ -346,44 +253,14 @@ func logWatchChannelErr(err error) { // startWatching does: // - get current objects if initialRev=0; set initialRev to current rev // - watch on given key and send events to process. -// -// initialEventsEndBookmarkSent helps us keep track -// of whether we have sent an annotated bookmark event. -// -// it's important to note that we don't -// need to track the actual RV because -// we only send the bookmark event -// after the initial list call. -// -// when this variable is set to false, -// it means we don't have any specific -// preferences for delivering bookmark events. -func (wc *watchChan) startWatching(watchClosedCh chan struct{}, initialEventsEndBookmarkRequired, forceInitialEvents bool) { - if wc.initialRev > 0 && forceInitialEvents { - currentStorageRV, err := wc.watcher.getCurrentStorageRV(wc.ctx) - if err != nil { - wc.sendError(err) - return - } - if uint64(wc.initialRev) > currentStorageRV { - wc.sendError(storage.NewTooLargeResourceVersionError(uint64(wc.initialRev), currentStorageRV, int(wait.Jitter(1*time.Second, 3).Seconds()))) - return - } - } - if forceInitialEvents { +func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { + if wc.initialRev == 0 { if err := wc.sync(); err != nil { klog.Errorf("failed to sync with latest state: %v", err) wc.sendError(err) return } } - if initialEventsEndBookmarkRequired { - wc.sendEvent(func() *event { - e := progressNotifyEvent(wc.initialRev) - e.isInitialEventsEndBookmark = true - return e - }()) - } opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()} if wc.recursive { opts = append(opts, clientv3.WithPrefix()) @@ -475,17 +352,14 @@ func (wc *watchChan) transform(e *event) (res *watch.Event) { switch { case e.isProgressNotify: + if wc.watcher.newFunc == nil { + return nil + } object := wc.watcher.newFunc() if err := wc.watcher.versioner.UpdateObject(object, uint64(e.rev)); err != nil { klog.Errorf("failed to propagate object version: %v", err) return nil } - if e.isInitialEventsEndBookmark { - if err := storage.AnnotateInitialEventsEndBookmark(object); err != nil { - wc.sendError(fmt.Errorf("error while accessing object's metadata gr: %v, type: %v, obj: %#v, err: %v", wc.watcher.groupResource, wc.watcher.objectType, object, err)) - return nil - } - } res = &watch.Event{ Type: watch.Bookmark, Object: object, @@ -573,7 +447,7 @@ func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtim } if !e.isDeleted { - data, _, err := wc.watcher.transformer.TransformFromStorage(wc.ctx, e.value, authenticatedDataString(e.key)) + data, _, err := wc.transformer.TransformFromStorage(wc.ctx, e.value, authenticatedDataString(e.key)) if err != nil { return nil, nil, err } @@ -588,7 +462,7 @@ func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtim // we need the object only to compute whether it was filtered out // before). if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) { - data, _, err := wc.watcher.transformer.TransformFromStorage(wc.ctx, e.prevValue, authenticatedDataString(e.key)) + data, _, err := wc.transformer.TransformFromStorage(wc.ctx, e.prevValue, authenticatedDataString(e.key)) if err != nil { return nil, nil, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go index 5489660809d..76123fde864 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/storage/interfaces.go @@ -282,19 +282,6 @@ type ListOptions struct { Recursive bool // ProgressNotify determines whether storage-originated bookmark (progress notify) events should // be delivered to the users. The option is ignored for non-watch requests. - // - // Firstly, note that this field is different from the Predicate.AllowWatchBookmarks field. - // Secondly, this field is intended for internal clients only such as the watch cache. - // - // This means that external clients do not have the ability to set this field directly. - // For example by setting the allowWatchBookmarks query parameter. - // - // The motivation for this approach is the fact that the frequency - // of bookmark events from a storage like etcd might be very high. - // As the number of watch requests increases, the server load would also increase. - // - // Furthermore, the server is not obligated to provide bookmark events at all, - // as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/956-watch-bookmark#proposal ProgressNotify bool // SendInitialEvents, when set together with Watch option, // begin the watch stream with synthetic init events to build the diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go index 93b1e707f66..47534c97818 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go @@ -62,6 +62,11 @@ type Config struct { Prefix string // Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to. Transport TransportConfig + // Paging indicates whether the server implementation should allow paging (if it is + // supported). This is generally configured by feature gating, or by a specific + // resource type not wishing to allow paging, and is not intended for end users to + // set. + Paging bool Codec runtime.Codec // EncodeVersioner is the same groupVersioner used to build the @@ -110,6 +115,7 @@ func (config *Config) ForResource(resource schema.GroupResource) *ConfigForResou func NewDefaultConfig(prefix string, codec runtime.Codec) *Config { return &Config{ + Paging: true, Prefix: prefix, Codec: codec, CompactionInterval: DefaultCompactInterval, diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go index 2aab5c76d21..94cb3710bed 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go @@ -419,7 +419,7 @@ func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration }, nil } -func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, DestroyFunc, error) { +func newETCD3Storage(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval) if err != nil { return nil, nil, err @@ -454,7 +454,7 @@ func newETCD3Storage(c storagebackend.ConfigForResource, newFunc, newListFunc fu if transformer == nil { transformer = identity.NewEncryptCheckTransformer() } - return etcd3.New(client, c.Codec, newFunc, newListFunc, c.Prefix, resourcePrefix, c.GroupResource, transformer, c.LeaseManagerConfig), destroyFunc, nil + return etcd3.New(client, c.Codec, newFunc, c.Prefix, c.GroupResource, transformer, c.Paging, c.LeaseManagerConfig), destroyFunc, nil } // startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the diff --git a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go index 2bf3727e8a7..1a60c92902c 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go +++ b/vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go @@ -30,12 +30,12 @@ import ( type DestroyFunc func() // Create creates a storage backend based on given config. -func Create(c storagebackend.ConfigForResource, newFunc, newListFunc func() runtime.Object, resourcePrefix string) (storage.Interface, DestroyFunc, error) { +func Create(c storagebackend.ConfigForResource, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) { switch c.Type { case storagebackend.StorageTypeETCD2: return nil, nil, fmt.Errorf("%s is no longer a supported storage backend", c.Type) case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3: - return newETCD3Storage(c, newFunc, newListFunc, resourcePrefix) + return newETCD3Storage(c, newFunc) default: return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/util.go b/vendor/k8s.io/apiserver/pkg/storage/util.go index 6d5fb36d24e..9da8d9713c1 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/util.go +++ b/vendor/k8s.io/apiserver/pkg/storage/util.go @@ -17,25 +17,14 @@ limitations under the License. package storage import ( - "context" "fmt" - "strconv" "sync/atomic" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/validation/path" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" ) -const ( - // initialEventsAnnotationKey the name of the key - // under which an annotation marking the end of list stream - // is kept. - initialEventsAnnotationKey = "k8s.io/initial-events-end" -) - type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) // SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc @@ -90,72 +79,3 @@ func (hwm *HighWaterMark) Update(current int64) bool { } } } - -// GetCurrentResourceVersionFromStorage gets the current resource version from the underlying storage engine. -// This method issues an empty list request and reads only the ResourceVersion from the object metadata -func GetCurrentResourceVersionFromStorage(ctx context.Context, storage Interface, newListFunc func() runtime.Object, resourcePrefix, objectType string) (uint64, error) { - if storage == nil { - return 0, fmt.Errorf("storage wasn't provided for %s", objectType) - } - if newListFunc == nil { - return 0, fmt.Errorf("newListFunction wasn't provided for %s", objectType) - } - emptyList := newListFunc() - pred := SelectionPredicate{ - Label: labels.Everything(), - Field: fields.Everything(), - Limit: 1, // just in case we actually hit something - } - - err := storage.GetList(ctx, resourcePrefix, ListOptions{Predicate: pred}, emptyList) - if err != nil { - return 0, err - } - emptyListAccessor, err := meta.ListAccessor(emptyList) - if err != nil { - return 0, err - } - if emptyListAccessor == nil { - return 0, fmt.Errorf("unable to extract a list accessor from %T", emptyList) - } - - currentResourceVersion, err := strconv.Atoi(emptyListAccessor.GetResourceVersion()) - if err != nil { - return 0, err - } - - if currentResourceVersion == 0 { - return 0, fmt.Errorf("the current resource version must be greater than 0") - } - return uint64(currentResourceVersion), nil -} - -// AnnotateInitialEventsEndBookmark adds a special annotation to the given object -// which indicates that the initial events have been sent. -// -// Note that this function assumes that the obj's annotation -// field is a reference type (i.e. a map). -func AnnotateInitialEventsEndBookmark(obj runtime.Object) error { - objMeta, err := meta.Accessor(obj) - if err != nil { - return err - } - objAnnotations := objMeta.GetAnnotations() - if objAnnotations == nil { - objAnnotations = map[string]string{} - } - objAnnotations[initialEventsAnnotationKey] = "true" - objMeta.SetAnnotations(objAnnotations) - return nil -} - -// HasInitialEventsEndBookmarkAnnotation checks the presence of the -// special annotation which marks that the initial events have been sent. -func HasInitialEventsEndBookmarkAnnotation(obj runtime.Object) (bool, error) { - objMeta, err := meta.Accessor(obj) - if err != nil { - return false, err - } - objAnnotations := objMeta.GetAnnotations() - return objAnnotations[initialEventsAnnotationKey] == "true", nil -} diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go index bc7f04b9c6b..c677f54b5ba 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/cache.go @@ -26,7 +26,6 @@ import ( utilcache "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apiserver/pkg/storage/value" - "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics" "k8s.io/utils/clock" ) @@ -39,13 +38,10 @@ type simpleCache struct { ttl time.Duration // hashPool is a per cache pool of hash.Hash (to avoid allocations from building the Hash) // SHA-256 is used to prevent collisions - hashPool *sync.Pool - providerName string - mu sync.Mutex // guards call to set - recordCacheSize func(providerName string, size int) // for unit tests + hashPool *sync.Pool } -func newSimpleCache(clock clock.Clock, ttl time.Duration, providerName string) *simpleCache { +func newSimpleCache(clock clock.Clock, ttl time.Duration) *simpleCache { cache := utilcache.NewExpiringWithClock(clock) cache.AllowExpiredGet = true // for a given key, the value (the decryptTransformer) is always the same return &simpleCache{ @@ -56,8 +52,6 @@ func newSimpleCache(clock clock.Clock, ttl time.Duration, providerName string) * return sha256.New() }, }, - providerName: providerName, - recordCacheSize: metrics.RecordDekSourceCacheSize, } } @@ -72,8 +66,6 @@ func (c *simpleCache) get(key []byte) value.Read { // set caches the record for the key func (c *simpleCache) set(key []byte, transformer value.Read) { - c.mu.Lock() - defer c.mu.Unlock() if len(key) == 0 { panic("key must not be empty") } @@ -81,8 +73,6 @@ func (c *simpleCache) set(key []byte, transformer value.Read) { panic("transformer must not be nil") } c.cache.Set(c.keyFunc(key), transformer, c.ttl) - // Add metrics for cache size - c.recordCacheSize(c.providerName, c.cache.Len()) } // keyFunc generates a string key by hashing the inputs. diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go index a20b10fc3cb..45d5db58b75 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/envelope.go @@ -28,7 +28,6 @@ import ( "unsafe" "github.com/gogo/protobuf/proto" - "go.opentelemetry.io/otel/attribute" "golang.org/x/crypto/cryptobyte" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -40,22 +39,21 @@ import ( aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes" kmstypes "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2" "k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics" - "k8s.io/component-base/tracing" "k8s.io/klog/v2" kmsservice "k8s.io/kms/pkg/service" "k8s.io/utils/clock" ) +// TODO integration test with old AES GCM data recorded and new KDF data recorded + func init() { value.RegisterMetrics() metrics.RegisterMetrics() } const ( - // KMSAPIVersionv2 is a version of the KMS API. - KMSAPIVersionv2 = "v2" - // KMSAPIVersionv2beta1 is a version of the KMS API. - KMSAPIVersionv2beta1 = "v2beta1" + // KMSAPIVersion is the version of the KMS API. + KMSAPIVersion = "v2beta1" // annotationsMaxSize is the maximum size of the annotations. annotationsMaxSize = 32 * 1024 // 32 kB // KeyIDMaxSize is the maximum size of the keyID. @@ -114,51 +112,32 @@ type envelopeTransformer struct { stateFunc StateFunc // cache is a thread-safe expiring lru cache which caches decrypted DEKs indexed by their encrypted form. - cache *simpleCache - apiServerID string + cache *simpleCache } // NewEnvelopeTransformer returns a transformer which implements a KEK-DEK based envelope encryption scheme. // It uses envelopeService to encrypt and decrypt DEKs. Respective DEKs (in encrypted form) are prepended to // the data items they encrypt. -func NewEnvelopeTransformer(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc, apiServerID string) value.Transformer { - return newEnvelopeTransformerWithClock(envelopeService, providerName, stateFunc, apiServerID, cacheTTL, clock.RealClock{}) +func NewEnvelopeTransformer(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc) value.Transformer { + return newEnvelopeTransformerWithClock(envelopeService, providerName, stateFunc, cacheTTL, clock.RealClock{}) } -func newEnvelopeTransformerWithClock(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc, apiServerID string, cacheTTL time.Duration, clock clock.Clock) value.Transformer { +func newEnvelopeTransformerWithClock(envelopeService kmsservice.Service, providerName string, stateFunc StateFunc, cacheTTL time.Duration, clock clock.Clock) value.Transformer { return &envelopeTransformer{ envelopeService: envelopeService, providerName: providerName, stateFunc: stateFunc, - cache: newSimpleCache(clock, cacheTTL, providerName), - apiServerID: apiServerID, + cache: newSimpleCache(clock, cacheTTL), } } // TransformFromStorage decrypts data encrypted by this transformer using envelope encryption. func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, bool, error) { - ctx, span := tracing.Start(ctx, "TransformFromStorage with envelopeTransformer", - attribute.String("transformer.provider.name", t.providerName), - // The service.instance_id of the apiserver is already available in the trace - /* - { - "key": "service.instance.id", - "type": "string", - "value": "apiserver-zsteyir5lyrtdcmqqmd5kzze6m" - } - */ - ) - defer span.End(500 * time.Millisecond) - - span.AddEvent("About to decode encrypted object") // Deserialize the EncryptedObject from the data. encryptedObject, err := t.doDecode(data) if err != nil { - span.AddEvent("Decoding encrypted object failed") - span.RecordError(err) return nil, false, err } - span.AddEvent("Decoded encrypted object") useSeed := encryptedObject.EncryptedDEKSourceType == kmstypes.EncryptedDEKSourceType_HKDF_SHA256_XNONCE_AES_GCM_SEED @@ -179,7 +158,6 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b // fallback to the envelope service if we do not have the transformer locally if transformer == nil { - span.AddEvent("About to decrypt DEK using remote service") value.RecordCacheMiss() requestInfo := getRequestInfoFromContext(ctx) @@ -194,28 +172,21 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b Annotations: encryptedObject.Annotations, }) if err != nil { - span.AddEvent("DEK decryption failed") - span.RecordError(err) return nil, false, fmt.Errorf("failed to decrypt DEK, error: %w", err) } - span.AddEvent("DEK decryption succeeded") transformer, err = t.addTransformerForDecryption(encryptedObjectCacheKey, key, useSeed) if err != nil { return nil, false, err } } - metrics.RecordKeyID(metrics.FromStorageLabel, t.providerName, encryptedObject.KeyID, t.apiServerID) + metrics.RecordKeyID(metrics.FromStorageLabel, t.providerName, encryptedObject.KeyID) - span.AddEvent("About to decrypt data using DEK") out, stale, err := transformer.TransformFromStorage(ctx, encryptedObject.EncryptedData, dataCtx) if err != nil { - span.AddEvent("Data decryption failed") - span.RecordError(err) return nil, false, err } - span.AddEvent("Data decryption succeeded") // data is considered stale if the key ID does not match our current write transformer return out, stale || @@ -226,19 +197,6 @@ func (t *envelopeTransformer) TransformFromStorage(ctx context.Context, data []b // TransformToStorage encrypts data to be written to disk using envelope encryption. func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byte, dataCtx value.Context) ([]byte, error) { - ctx, span := tracing.Start(ctx, "TransformToStorage with envelopeTransformer", - attribute.String("transformer.provider.name", t.providerName), - // The service.instance_id of the apiserver is already available in the trace - /* - { - "key": "service.instance.id", - "type": "string", - "value": "apiserver-zsteyir5lyrtdcmqqmd5kzze6m" - } - */ - ) - defer span.End(500 * time.Millisecond) - state, err := t.stateFunc() if err != nil { return nil, err @@ -250,6 +208,7 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt // this prevents a cache miss every time the DEK rotates // this has the side benefit of causing the cache to perform a GC // TODO see if we can do this inside the stateFunc control loop + // TODO(aramase): Add metrics for cache size. t.cache.set(state.CacheKey, state.Transformer) requestInfo := getRequestInfoFromContext(ctx) @@ -257,31 +216,18 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt "group", requestInfo.APIGroup, "version", requestInfo.APIVersion, "resource", requestInfo.Resource, "subresource", requestInfo.Subresource, "verb", requestInfo.Verb, "namespace", requestInfo.Namespace, "name", requestInfo.Name) - span.AddEvent("About to encrypt data using DEK") result, err := state.Transformer.TransformToStorage(ctx, data, dataCtx) if err != nil { - span.AddEvent("Data encryption failed") - span.RecordError(err) return nil, err } - span.AddEvent("Data encryption succeeded") - metrics.RecordKeyID(metrics.ToStorageLabel, t.providerName, state.EncryptedObject.KeyID, t.apiServerID) + metrics.RecordKeyID(metrics.ToStorageLabel, t.providerName, state.EncryptedObject.KeyID) encObjectCopy := state.EncryptedObject encObjectCopy.EncryptedData = result - span.AddEvent("About to encode encrypted object") // Serialize the EncryptedObject to a byte array. - out, err := t.doEncode(&encObjectCopy) - if err != nil { - span.AddEvent("Encoding encrypted object failed") - span.RecordError(err) - return nil, err - } - span.AddEvent("Encoded encrypted object") - - return out, nil + return t.doEncode(&encObjectCopy) } // addTransformerForDecryption inserts a new transformer to the Envelope cache of DEKs for future reads. @@ -304,6 +250,7 @@ func (t *envelopeTransformer) addTransformerForDecryption(cacheKey []byte, key [ if err != nil { return nil, err } + // TODO(aramase): Add metrics for cache size. t.cache.set(cacheKey, transformer) return transformer, nil } diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go index 25150288f6e..811c8f67d25 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.pb.go @@ -71,20 +71,11 @@ type EncryptedObject struct { // EncryptedData is the encrypted data. EncryptedData []byte `protobuf:"bytes,1,opt,name=encryptedData,proto3" json:"encryptedData,omitempty"` // KeyID is the KMS key ID used for encryption operations. - // keyID must satisfy the following constraints: - // 1. The keyID is not empty. - // 2. The size of keyID is less than 1 kB. KeyID string `protobuf:"bytes,2,opt,name=keyID,proto3" json:"keyID,omitempty"` // EncryptedDEKSource is the ciphertext of the source of the DEK used to encrypt the data stored in encryptedData. // encryptedDEKSourceType defines the process of using the plaintext of this field to determine the aforementioned DEK. - // encryptedDEKSource must satisfy the following constraints: - // 1. The encrypted DEK source is not empty. - // 2. The size of encrypted DEK source is less than 1 kB. EncryptedDEKSource []byte `protobuf:"bytes,3,opt,name=encryptedDEKSource,proto3" json:"encryptedDEKSource,omitempty"` // Annotations is additional metadata that was provided by the KMS plugin. - // Annotations must satisfy the following constraints: - // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). - // 2. The size of annotations keys + values is less than 32 kB. Annotations map[string][]byte `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // encryptedDEKSourceType defines the process of using the plaintext of encryptedDEKSource to determine the DEK. EncryptedDEKSourceType EncryptedDEKSourceType `protobuf:"varint,5,opt,name=encryptedDEKSourceType,proto3,enum=v2.EncryptedDEKSourceType" json:"encryptedDEKSourceType,omitempty"` diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto index 674b3f4a9ae..ec1eb2680c8 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/kmsv2/v2/api.proto @@ -26,22 +26,13 @@ message EncryptedObject { bytes encryptedData = 1; // KeyID is the KMS key ID used for encryption operations. - // keyID must satisfy the following constraints: - // 1. The keyID is not empty. - // 2. The size of keyID is less than 1 kB. string keyID = 2; // EncryptedDEKSource is the ciphertext of the source of the DEK used to encrypt the data stored in encryptedData. // encryptedDEKSourceType defines the process of using the plaintext of this field to determine the aforementioned DEK. - // encryptedDEKSource must satisfy the following constraints: - // 1. The encrypted DEK source is not empty. - // 2. The size of encrypted DEK source is less than 1 kB. bytes encryptedDEKSource = 3; // Annotations is additional metadata that was provided by the KMS plugin. - // Annotations must satisfy the following constraints: - // 1. Annotation key must be a fully qualified domain name that conforms to the definition in DNS (RFC 1123). - // 2. The size of annotations keys + values is less than 32 kB. map annotations = 4; // encryptedDEKSourceType defines the process of using the plaintext of encryptedDEKSource to determine the DEK. diff --git a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go index 63723648c97..ff3903805d6 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/metrics/metrics.go @@ -44,7 +44,6 @@ type metricLabels struct { transformationType string providerName string keyIDHash string - apiServerIDHash string } /* @@ -108,21 +107,21 @@ var ( // keyIDHashTotal is the number of times a keyID is used // e.g. apiserver_envelope_encryption_key_id_hash_total counter - // apiserver_envelope_encryption_key_id_hash_total{apiserver_id_hash="sha256",key_id_hash="sha256", + // apiserver_envelope_encryption_key_id_hash_total{key_id_hash="sha256", // provider_name="providerName",transformation_type="from_storage"} 1 KeyIDHashTotal = metrics.NewCounterVec( &metrics.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "key_id_hash_total", - Help: "Number of times a keyID is used split by transformation type, provider, and apiserver identity.", + Help: "Number of times a keyID is used split by transformation type and provider.", StabilityLevel: metrics.ALPHA, }, - []string{"transformation_type", "provider_name", "key_id_hash", "apiserver_id_hash"}, + []string{"transformation_type", "provider_name", "key_id_hash"}, ) // keyIDHashLastTimestampSeconds is the last time in seconds when a keyID was used - // e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{apiserver_id_hash="sha256",key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09 + // e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09 KeyIDHashLastTimestampSeconds = metrics.NewGaugeVec( &metrics.GaugeOpts{ Namespace: namespace, @@ -131,11 +130,11 @@ var ( Help: "The last time in seconds when a keyID was used.", StabilityLevel: metrics.ALPHA, }, - []string{"transformation_type", "provider_name", "key_id_hash", "apiserver_id_hash"}, + []string{"transformation_type", "provider_name", "key_id_hash"}, ) // keyIDHashStatusLastTimestampSeconds is the last time in seconds when a keyID was returned by the Status RPC call. - // e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{apiserver_id_hash="sha256",key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09 + // e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09 KeyIDHashStatusLastTimestampSeconds = metrics.NewGaugeVec( &metrics.GaugeOpts{ Namespace: namespace, @@ -144,7 +143,7 @@ var ( Help: "The last time in seconds when a keyID was returned by the Status RPC call.", StabilityLevel: metrics.ALPHA, }, - []string{"provider_name", "key_id_hash", "apiserver_id_hash"}, + []string{"provider_name", "key_id_hash"}, ) InvalidKeyIDFromStatusTotal = metrics.NewCounterVec( @@ -157,17 +156,6 @@ var ( }, []string{"provider_name", "error"}, ) - - DekSourceCacheSize = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: "dek_source_cache_size", - Help: "Number of records in data encryption key (DEK) source cache. On a restart, this value is an approximation of the number of decrypt RPC calls the server will make to the KMS plugin.", - StabilityLevel: metrics.ALPHA, - }, - []string{"provider_name"}, - ) ) var registerMetricsFunc sync.Once @@ -183,19 +171,19 @@ func registerLRUMetrics() { keyIDHashTotalMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) { item := key.(metricLabels) - if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash, item.apiServerIDHash); deleted { + if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted { klog.InfoS("Deleted keyIDHashTotalMetricLabels", "transformationType", item.transformationType, - "providerName", item.providerName, "keyIDHash", item.keyIDHash, "apiServerIDHash", item.apiServerIDHash) + "providerName", item.providerName, "keyIDHash", item.keyIDHash) } - if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash, item.apiServerIDHash); deleted { + if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted { klog.InfoS("Deleted keyIDHashLastTimestampSecondsMetricLabels", "transformationType", item.transformationType, - "providerName", item.providerName, "keyIDHash", item.keyIDHash, "apiServerIDHash", item.apiServerIDHash) + "providerName", item.providerName, "keyIDHash", item.keyIDHash) } }) keyIDHashStatusLastTimestampSecondsMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) { item := key.(metricLabels) - if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash, item.apiServerIDHash); deleted { - klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash, "apiServerIDHash", item.apiServerIDHash) + if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash); deleted { + klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash) } }) } @@ -209,7 +197,6 @@ func RegisterMetrics() { } legacyregistry.MustRegister(dekCacheFillPercent) legacyregistry.MustRegister(dekCacheInterArrivals) - legacyregistry.MustRegister(DekSourceCacheSize) legacyregistry.MustRegister(KeyIDHashTotal) legacyregistry.MustRegister(KeyIDHashLastTimestampSeconds) legacyregistry.MustRegister(KeyIDHashStatusLastTimestampSeconds) @@ -219,22 +206,22 @@ func RegisterMetrics() { } // RecordKeyID records total count and last time in seconds when a KeyID was used for TransformFromStorage and TransformToStorage operations -func RecordKeyID(transformationType, providerName, keyID, apiServerID string) { +func RecordKeyID(transformationType, providerName, keyID string) { lockRecordKeyID.Lock() defer lockRecordKeyID.Unlock() - keyIDHash, apiServerIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID, apiServerID) - KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash, apiServerIDHash).Inc() - KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash, apiServerIDHash).SetToCurrentTime() + keyIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID) + KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash).Inc() + KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash).SetToCurrentTime() } // RecordKeyIDFromStatus records last time in seconds when a KeyID was returned by the Status RPC call. -func RecordKeyIDFromStatus(providerName, keyID, apiServerID string) { +func RecordKeyIDFromStatus(providerName, keyID string) { lockRecordKeyIDStatus.Lock() defer lockRecordKeyIDStatus.Unlock() - keyIDHash, apiServerIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID, apiServerID) - KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash, apiServerIDHash).SetToCurrentTime() + keyIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID) + KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash).SetToCurrentTime() } func RecordInvalidKeyIDFromStatus(providerName, errCode string) { @@ -268,10 +255,6 @@ func RecordDekCacheFillPercent(percent float64) { dekCacheFillPercent.Set(percent) } -func RecordDekSourceCacheSize(providerName string, size int) { - DekSourceCacheSize.WithLabelValues(providerName).Set(float64(size)) -} - // RecordKMSOperationLatency records the latency of KMS operation. func RecordKMSOperationLatency(providerName, methodName string, duration time.Duration, err error) { KMSOperationsLatencyMetric.WithLabelValues(providerName, methodName, getErrorCode(err)).Observe(duration.Seconds()) @@ -298,25 +281,24 @@ func getErrorCode(err error) string { } func getHash(data string) string { - if len(data) == 0 { - return "" - } h := hashPool.Get().(hash.Hash) h.Reset() h.Write([]byte(data)) - dataHash := fmt.Sprintf("sha256:%x", h.Sum(nil)) + result := fmt.Sprintf("sha256:%x", h.Sum(nil)) hashPool.Put(h) - return dataHash + return result } -func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID, apiServerID string) (string, string) { - keyIDHash := getHash(keyID) - apiServerIDHash := getHash(apiServerID) +func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID string) string { + keyIDHash := "" + // only get hash if the keyID is not empty + if len(keyID) > 0 { + keyIDHash = getHash(keyID) + } c.Add(metricLabels{ transformationType: transformationType, providerName: providerName, keyIDHash: keyIDHash, - apiServerIDHash: apiServerIDHash, }, nil) // value is irrelevant, this is a set and not a map - return keyIDHash, apiServerIDHash + return keyIDHash } diff --git a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go index 9a6b2a28ef1..ffc1a0e4062 100644 --- a/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go +++ b/vendor/k8s.io/apiserver/pkg/util/apihelpers/helpers.go @@ -19,7 +19,7 @@ package apihelpers import ( "sort" - flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" ) // SetFlowSchemaCondition sets conditions. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index d40cae509d2..8c90811bf45 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -50,10 +50,10 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/clock" - flowcontrol "k8s.io/api/flowcontrol/v1" - flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" - flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrolapplyconfiguration "k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" + flowcontrollister "k8s.io/client-go/listers/flowcontrol/v1beta3" ) const timeFmt = "2006-01-02T15:04:05.999" @@ -143,7 +143,7 @@ type configController struct { fsLister flowcontrollister.FlowSchemaLister fsInformerSynced cache.InformerSynced - flowcontrolClient flowcontrolclient.FlowcontrolV1Interface + flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface // serverConcurrencyLimit is the limit on the server's total // number of non-exempt requests being served at once. This comes @@ -295,7 +295,7 @@ func newTestableController(config TestableConfig) *configController { cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") // ensure the data structure reflects the mandatory config cfgCtlr.lockAndDigestConfigObjects(nil, nil) - fci := config.InformerFactory.Flowcontrol().V1() + fci := config.InformerFactory.Flowcontrol().V1beta3() pli := fci.PriorityLevelConfigurations() fsi := fci.FlowSchemas() cfgCtlr.plLister = pli.Lister() @@ -702,7 +702,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi state.quiescing = false } nominalConcurrencyShares, _, _ := plSpecCommons(state.pl) - meal.shareSum += float64(*nominalConcurrencyShares) + meal.shareSum += float64(nominalConcurrencyShares) meal.haveExemptPL = meal.haveExemptPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameExempt meal.haveCatchAllPL = meal.haveCatchAllPL || pl.Name == flowcontrol.PriorityLevelConfigurationNameCatchAll } @@ -807,7 +807,7 @@ func (meal *cfgMeal) processOldPLsLocked() { // allocation determined by all the share values in the // regular way. nominalConcurrencyShares, _, _ := plSpecCommons(plState.pl) - meal.shareSum += float64(*nominalConcurrencyShares) + meal.shareSum += float64(nominalConcurrencyShares) meal.haveExemptPL = meal.haveExemptPL || plName == flowcontrol.PriorityLevelConfigurationNameExempt meal.haveCatchAllPL = meal.haveCatchAllPL || plName == flowcontrol.PriorityLevelConfigurationNameCatchAll meal.newPLStates[plName] = plState @@ -823,7 +823,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // The use of math.Ceil here means that the results might sum // to a little more than serverConcurrencyLimit but the // difference will be negligible. - concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(*nominalConcurrencyShares) / meal.shareSum)) + concurrencyLimit := int(math.Ceil(float64(meal.cfgCtlr.serverConcurrencyLimit) * float64(nominalConcurrencyShares) / meal.shareSum)) var lendableCL, borrowingCL int if lendablePercent != nil { lendableCL = int(math.Round(float64(concurrencyLimit) * float64(*lendablePercent) / 100)) @@ -974,7 +974,7 @@ func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) { seatDemandRatioedGauge: seatDemandRatioedGauge, } nominalConcurrencyShares, _, _ := plSpecCommons(proto) - meal.shareSum += float64(*nominalConcurrencyShares) + meal.shareSum += float64(nominalConcurrencyShares) } // startRequest classifies and, if appropriate, enqueues the request. @@ -1112,7 +1112,7 @@ func relDiff(x, y float64) float64 { } // plSpecCommons returns the (NominalConcurrencyShares, LendablePercent, BorrowingLimitPercent) of the given priority level config -func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (*int32, *int32, *int32) { +func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (int32, *int32, *int32) { if limiter := pl.Spec.Limited; limiter != nil { return limiter.NominalConcurrencyShares, limiter.LendablePercent, limiter.BorrowingLimitPercent } @@ -1121,5 +1121,5 @@ func plSpecCommons(pl *flowcontrol.PriorityLevelConfiguration) (*int32, *int32, if limiter.NominalConcurrencyShares != nil { nominalConcurrencyShares = *limiter.NominalConcurrencyShares } - return &nominalConcurrencyShares, limiter.LendablePercent, nil + return nominalConcurrencyShares, limiter.LendablePercent, nil } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 2a4bf10f7bb..05f4f5e5392 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -33,8 +33,8 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/clock" - flowcontrol "k8s.io/api/flowcontrol/v1" - flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" + flowcontrolclient "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" ) // ConfigConsumerAsFieldManager is how the config consuminng @@ -88,7 +88,7 @@ type Interface interface { // New creates a new instance to implement API priority and fairness func New( informerFactory kubeinformers.SharedInformerFactory, - flowcontrolClient flowcontrolclient.FlowcontrolV1Interface, + flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface, serverConcurrencyLimit int, ) Interface { clk := eventclock.Real{} @@ -132,7 +132,7 @@ type TestableConfig struct { InformerFactory kubeinformers.SharedInformerFactory // FlowcontrolClient to use for manipulating config objects - FlowcontrolClient flowcontrolclient.FlowcontrolV1Interface + FlowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface // ServerConcurrencyLimit for the controller to enforce ServerConcurrencyLimit int diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go index 79d19d136fc..d3bda40aaa7 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/promise/promise.go @@ -17,13 +17,12 @@ limitations under the License. package promise import ( - "context" "sync" ) // promise implements the WriteOnce interface. type promise struct { - doneCtx context.Context + doneCh <-chan struct{} doneVal interface{} setCh chan struct{} onceler sync.Once @@ -36,12 +35,12 @@ var _ WriteOnce = &promise{} // // If `initial` is non-nil then that value is Set at creation time. // -// If a `Get` is waiting soon after the channel associated with the -// `doneCtx` becomes selectable (which never happens for the nil -// channel) then `Set(doneVal)` effectively happens at that time. -func NewWriteOnce(initial interface{}, doneCtx context.Context, doneVal interface{}) WriteOnce { +// If a `Get` is waiting soon after `doneCh` becomes selectable (which +// never happens for the nil channel) then `Set(doneVal)` effectively +// happens at that time. +func NewWriteOnce(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) WriteOnce { p := &promise{ - doneCtx: doneCtx, + doneCh: doneCh, doneVal: doneVal, setCh: make(chan struct{}), } @@ -54,7 +53,7 @@ func NewWriteOnce(initial interface{}, doneCtx context.Context, doneVal interfac func (p *promise) Get() interface{} { select { case <-p.setCh: - case <-p.doneCtx.Done(): + case <-p.doneCh: p.Set(p.doneVal) } return p.value diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index b675bb5453c..604e0862ab2 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -53,7 +53,7 @@ type queueSetFactory struct { // - whose Set method is invoked with the queueSet locked, and // - whose Get method is invoked with the queueSet not locked. // The parameters are the same as for `promise.NewWriteOnce`. -type promiseFactory func(initial interface{}, doneCtx context.Context, doneVal interface{}) promise.WriteOnce +type promiseFactory func(initial interface{}, doneCh <-chan struct{}, doneVal interface{}) promise.WriteOnce // promiseFactoryFactory returns the promiseFactory to use for the given queueSet type promiseFactoryFactory func(*queueSet) promiseFactory @@ -567,7 +567,7 @@ func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, wo fsName: fsName, flowDistinguisher: flowDistinguisher, ctx: ctx, - decision: qs.promiseFactory(nil, ctx, decisionCancel), + decision: qs.promiseFactory(nil, ctx.Done(), decisionCancel), arrivalTime: qs.clock.Now(), arrivalR: qs.currentR, queue: queue, @@ -670,7 +670,7 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f flowDistinguisher: flowDistinguisher, ctx: ctx, startTime: now, - decision: qs.promiseFactory(decisionExecute, ctx, decisionCancel), + decision: qs.promiseFactory(decisionExecute, ctx.Done(), decisionCancel), arrivalTime: now, arrivalR: qs.currentR, descr1: descr1, diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go index ced0eac3121..49444237385 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/format/formatting.go @@ -21,7 +21,7 @@ import ( "encoding/json" "fmt" - flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" ) @@ -93,7 +93,7 @@ func FmtPriorityLevelConfiguration(pl *flowcontrol.PriorityLevelConfiguration) s return "nil" } var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("&flowcontrolv1.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ", + buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.PriorityLevelConfiguration{ObjectMeta: %#+v, Spec: ", pl.ObjectMeta)) BufferPriorityLevelConfigurationSpec(&buf, &pl.Spec) buf.WriteString(fmt.Sprintf(", Status: %#+v}", pl.Status)) @@ -111,7 +111,7 @@ func FmtPriorityLevelConfigurationSpec(plSpec *flowcontrol.PriorityLevelConfigur // BufferPriorityLevelConfigurationSpec writes a golang source // expression for the given value to the given buffer func BufferPriorityLevelConfigurationSpec(buf *bytes.Buffer, plSpec *flowcontrol.PriorityLevelConfigurationSpec) { - buf.WriteString(fmt.Sprintf("flowcontrolv1.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type)) + buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.PriorityLevelConfigurationSpec{Type: %#v", plSpec.Type)) if plSpec.Limited != nil { buf.WriteString(fmt.Sprintf(", Limited: &flowcontrol.LimitedPriorityLevelConfiguration{NominalConcurrencyShares:%d, LimitResponse:flowcontrol.LimitResponse{Type:%#v", plSpec.Limited.NominalConcurrencyShares, plSpec.Limited.LimitResponse.Type)) if plSpec.Limited.LimitResponse.Queuing != nil { @@ -128,7 +128,7 @@ func FmtFlowSchema(fs *flowcontrol.FlowSchema) string { return "nil" } var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("&flowcontrolv1.FlowSchema{ObjectMeta: %#+v, Spec: ", + buf.WriteString(fmt.Sprintf("&flowcontrolv1beta3.FlowSchema{ObjectMeta: %#+v, Spec: ", fs.ObjectMeta)) BufferFlowSchemaSpec(&buf, &fs.Spec) buf.WriteString(fmt.Sprintf(", Status: %#+v}", fs.Status)) @@ -146,7 +146,7 @@ func FmtFlowSchemaSpec(fsSpec *flowcontrol.FlowSchemaSpec) string { // BufferFlowSchemaSpec writes a golang source expression for the // given value to the given buffer func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) { - buf.WriteString(fmt.Sprintf("flowcontrolv1.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ", + buf.WriteString(fmt.Sprintf("flowcontrolv1beta3.FlowSchemaSpec{PriorityLevelConfiguration: %#+v, MatchingPrecedence: %d, DistinguisherMethod: ", fsSpec.PriorityLevelConfiguration, fsSpec.MatchingPrecedence)) if fsSpec.DistinguisherMethod == nil { @@ -166,7 +166,7 @@ func BufferFlowSchemaSpec(buf *bytes.Buffer, fsSpec *flowcontrol.FlowSchemaSpec) // FmtPolicyRulesWithSubjects produces a golang source expression of the value. func FmtPolicyRulesWithSubjects(rule flowcontrol.PolicyRulesWithSubjects) string { - return "flowcontrolv1.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule) + return "flowcontrolv1beta3.PolicyRulesWithSubjects" + FmtPolicyRulesWithSubjectsSlim(rule) } // FmtPolicyRulesWithSubjectsSlim produces a golang source expression @@ -182,7 +182,7 @@ func FmtPolicyRulesWithSubjectsSlim(rule flowcontrol.PolicyRulesWithSubjects) st // expression for the given value to the given buffer but excludes the // leading type name func BufferFmtPolicyRulesWithSubjectsSlim(buf *bytes.Buffer, rule flowcontrol.PolicyRulesWithSubjects) { - buf.WriteString("{Subjects: []flowcontrolv1.Subject{") + buf.WriteString("{Subjects: []flowcontrolv1beta3.Subject{") for jdx, subj := range rule.Subjects { if jdx > 0 { buf.WriteString(", ") diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go index 6b941cb7fe1..8d20867d6dd 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/request/list_work_estimator.go @@ -117,7 +117,8 @@ func (e *listWorkEstimator) estimate(r *http.Request, flowSchemaName, priorityLe } limit := numStored - if listOptions.Limit > 0 && listOptions.Limit < numStored { + if utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) && listOptions.Limit > 0 && + listOptions.Limit < numStored { limit = listOptions.Limit } @@ -164,14 +165,15 @@ func key(requestInfo *apirequest.RequestInfo) string { func shouldListFromStorage(query url.Values, opts *metav1.ListOptions) bool { resourceVersion := opts.ResourceVersion match := opts.ResourceVersionMatch + pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking) consistentListFromCacheEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) // Serve consistent reads from storage if ConsistentListFromCache is disabled consistentReadFromStorage := resourceVersion == "" && !consistentListFromCacheEnabled // Watch cache doesn't support continuations, so serve them from etcd. - hasContinuation := len(opts.Continue) > 0 + hasContinuation := pagingEnabled && len(opts.Continue) > 0 // Serve paginated requests about revision "0" from watch cache to avoid overwhelming etcd. - hasLimit := opts.Limit > 0 && resourceVersion != "0" + hasLimit := pagingEnabled && opts.Limit > 0 && resourceVersion != "0" // Watch cache only supports ResourceVersionMatchNotOlderThan (default). unsupportedMatch := match != "" && match != metav1.ResourceVersionMatchNotOlderThan diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go index 03c18b8e127..a404d3286e4 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/rule.go @@ -19,7 +19,7 @@ package flowcontrol import ( "strings" - flowcontrol "k8s.io/api/flowcontrol/v1" + flowcontrol "k8s.io/api/flowcontrol/v1beta3" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go index 0816b45a102..95e4060bd11 100644 --- a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -18,9 +18,9 @@ package webhook import ( "fmt" + "io/ioutil" "net" "net/http" - "os" "strconv" "strings" "time" @@ -233,7 +233,7 @@ func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Conf config.BearerToken = configAuthInfo.Token config.BearerTokenFile = configAuthInfo.TokenFile } else if len(configAuthInfo.TokenFile) > 0 { - tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile) + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go index 29ee0e84d15..191b3731850 100644 --- a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -31,13 +31,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/cache" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/apis/apiserver" - apiservervalidation "k8s.io/apiserver/pkg/apis/apiserver/validation" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/authorization/authorizer" - authorizationcel "k8s.io/apiserver/pkg/authorization/cel" - "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/client-go/kubernetes/scheme" authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -71,12 +66,11 @@ type WebhookAuthorizer struct { retryBackoff wait.Backoff decisionOnError authorizer.Decision metrics AuthorizerMetrics - celMatcher *authorizationcel.CELMatcher } // NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client -func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { - return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, nil, metrics) +func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { + return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, metrics) } // New creates a new WebhookAuthorizer from the provided kubeconfig file. @@ -98,33 +92,27 @@ func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1I // // For additional HTTP configuration, refer to the kubeconfig documentation // https://kubernetes.io/docs/user-guide/kubeconfig-file/. -func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition) (*WebhookAuthorizer, error) { +func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff) (*WebhookAuthorizer, error) { subjectAccessReview, err := subjectAccessReviewInterfaceFromConfig(config, version, retryBackoff) if err != nil { return nil, err } - return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, matchConditions, AuthorizerMetrics{ + return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, AuthorizerMetrics{ RecordRequestTotal: noopMetrics{}.RecordRequestTotal, RecordRequestLatency: noopMetrics{}.RecordRequestLatency, }) } // newWithBackoff allows tests to skip the sleep. -func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { - // compile all expressions once in validation and save the results to be used for eval later - cm, fieldErr := apiservervalidation.ValidateAndCompileMatchConditions(matchConditions) - if err := fieldErr.ToAggregate(); err != nil { - return nil, err - } +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, metrics AuthorizerMetrics) (*WebhookAuthorizer, error) { return &WebhookAuthorizer{ subjectAccessReview: subjectAccessReview, responseCache: cache.NewLRUExpireCache(8192), authorizedTTL: authorizedTTL, unauthorizedTTL: unauthorizedTTL, retryBackoff: retryBackoff, - decisionOnError: decisionOnError, + decisionOnError: authorizer.DecisionNoOpinion, metrics: metrics, - celMatcher: cm, }, nil } @@ -202,24 +190,6 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri Verb: attr.GetVerb(), } } - // skipping match when feature is not enabled - if utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration) { - // Process Match Conditions before calling the webhook - matches, err := w.match(ctx, r) - // If at least one matchCondition evaluates to an error (but none are FALSE): - // If failurePolicy=Deny, then the webhook rejects the request - // If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped - if err != nil { - return w.decisionOnError, "", err - } - // If at least one matchCondition successfully evaluates to FALSE, - // then the webhook is skipped. - if !matches { - return authorizer.DecisionNoOpinion, "", nil - } - } - // If all evaluated successfully and ALL matchConditions evaluate to TRUE, - // then the webhook is called. key, err := json.Marshal(r.Spec) if err != nil { return w.decisionOnError, "", err @@ -286,18 +256,6 @@ func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]author return resourceRules, nonResourceRules, incomplete, fmt.Errorf("webhook authorizer does not support user rule resolution") } -// Match is used to evaluate the SubjectAccessReviewSpec against -// the authorizer's matchConditions in the form of cel expressions -// to return match or no match found, which then is used to -// determine if the webhook should be skipped. -func (w *WebhookAuthorizer) match(ctx context.Context, r *authorizationv1.SubjectAccessReview) (bool, error) { - // A nil celMatcher or zero saved CompilationResults matches all requests. - if w.celMatcher == nil || w.celMatcher.CompilationResults == nil { - return true, nil - } - return w.celMatcher.Eval(ctx, r) -} - func convertToSARExtra(extra map[string][]string) map[string]authorizationv1.ExtraValue { if extra == nil { return nil diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go deleted file mode 100644 index 5aa686782b4..00000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ClusterTrustBundleProjectionApplyConfiguration represents an declarative configuration of the ClusterTrustBundleProjection type for use -// with apply. -type ClusterTrustBundleProjectionApplyConfiguration struct { - Name *string `json:"name,omitempty"` - SignerName *string `json:"signerName,omitempty"` - LabelSelector *v1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"` - Optional *bool `json:"optional,omitempty"` - Path *string `json:"path,omitempty"` -} - -// ClusterTrustBundleProjectionApplyConfiguration constructs an declarative configuration of the ClusterTrustBundleProjection type for use with -// apply. -func ClusterTrustBundleProjection() *ClusterTrustBundleProjectionApplyConfiguration { - return &ClusterTrustBundleProjectionApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithName(value string) *ClusterTrustBundleProjectionApplyConfiguration { - b.Name = &value - return b -} - -// WithSignerName sets the SignerName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SignerName field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithSignerName(value string) *ClusterTrustBundleProjectionApplyConfiguration { - b.SignerName = &value - return b -} - -// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LabelSelector field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithLabelSelector(value *v1.LabelSelectorApplyConfiguration) *ClusterTrustBundleProjectionApplyConfiguration { - b.LabelSelector = value - return b -} - -// WithOptional sets the Optional field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Optional field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithOptional(value bool) *ClusterTrustBundleProjectionApplyConfiguration { - b.Optional = &value - return b -} - -// WithPath sets the Path field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Path field is set to the value of the last call. -func (b *ClusterTrustBundleProjectionApplyConfiguration) WithPath(value string) *ClusterTrustBundleProjectionApplyConfiguration { - b.Path = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go index e4ae9c49f79..6e373dd4ed1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go @@ -24,7 +24,6 @@ type LifecycleHandlerApplyConfiguration struct { Exec *ExecActionApplyConfiguration `json:"exec,omitempty"` HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"` TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"` - Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"` } // LifecycleHandlerApplyConfiguration constructs an declarative configuration of the LifecycleHandler type for use with @@ -56,11 +55,3 @@ func (b *LifecycleHandlerApplyConfiguration) WithTCPSocket(value *TCPSocketActio b.TCPSocket = value return b } - -// WithSleep sets the Sleep field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Sleep field is set to the value of the last call. -func (b *LifecycleHandlerApplyConfiguration) WithSleep(value *SleepActionApplyConfiguration) *LifecycleHandlerApplyConfiguration { - b.Sleep = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go index a48dac68107..64d27bdad50 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go @@ -18,16 +18,11 @@ limitations under the License. package v1 -import ( - v1 "k8s.io/api/core/v1" -) - // LoadBalancerIngressApplyConfiguration represents an declarative configuration of the LoadBalancerIngress type for use // with apply. type LoadBalancerIngressApplyConfiguration struct { IP *string `json:"ip,omitempty"` Hostname *string `json:"hostname,omitempty"` - IPMode *v1.LoadBalancerIPMode `json:"ipMode,omitempty"` Ports []PortStatusApplyConfiguration `json:"ports,omitempty"` } @@ -53,14 +48,6 @@ func (b *LoadBalancerIngressApplyConfiguration) WithHostname(value string) *Load return b } -// WithIPMode sets the IPMode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPMode field is set to the value of the last call. -func (b *LoadBalancerIngressApplyConfiguration) WithIPMode(value v1.LoadBalancerIPMode) *LoadBalancerIngressApplyConfiguration { - b.IPMode = &value - return b -} - // WithPorts adds the given value to the Ports field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Ports field. diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go deleted file mode 100644 index 4ff1d040cf3..00000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// ModifyVolumeStatusApplyConfiguration represents an declarative configuration of the ModifyVolumeStatus type for use -// with apply. -type ModifyVolumeStatusApplyConfiguration struct { - TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"` - Status *v1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"` -} - -// ModifyVolumeStatusApplyConfiguration constructs an declarative configuration of the ModifyVolumeStatus type for use with -// apply. -func ModifyVolumeStatus() *ModifyVolumeStatusApplyConfiguration { - return &ModifyVolumeStatusApplyConfiguration{} -} - -// WithTargetVolumeAttributesClassName sets the TargetVolumeAttributesClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the TargetVolumeAttributesClassName field is set to the value of the last call. -func (b *ModifyVolumeStatusApplyConfiguration) WithTargetVolumeAttributesClassName(value string) *ModifyVolumeStatusApplyConfiguration { - b.TargetVolumeAttributesClassName = &value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *ModifyVolumeStatusApplyConfiguration) WithStatus(value v1.PersistentVolumeClaimModifyVolumeStatus) *ModifyVolumeStatusApplyConfiguration { - b.Status = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go index 4db12fadb38..f324584aba2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go @@ -26,15 +26,14 @@ import ( // PersistentVolumeClaimSpecApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimSpec type for use // with apply. type PersistentVolumeClaimSpecApplyConfiguration struct { - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` - Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"` - VolumeName *string `json:"volumeName,omitempty"` - StorageClassName *string `json:"storageClassName,omitempty"` - VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` - DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` - DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` - VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"` + Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"` + VolumeName *string `json:"volumeName,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` + VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` + DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"` + DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"` } // PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with @@ -64,7 +63,7 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithSelector(value *metav1 // WithResources sets the Resources field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Resources field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *VolumeResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { +func (b *PersistentVolumeClaimSpecApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration { b.Resources = value return b } @@ -108,11 +107,3 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *T b.DataSourceRef = value return b } - -// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. -func (b *PersistentVolumeClaimSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeClaimSpecApplyConfiguration { - b.VolumeAttributesClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go index 1f6d5ae3238..c29b2a9a155 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go @@ -25,14 +25,12 @@ import ( // PersistentVolumeClaimStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeClaimStatus type for use // with apply. type PersistentVolumeClaimStatusApplyConfiguration struct { - Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` - AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` - AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` - AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` - CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"` - ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"` + Phase *v1.PersistentVolumeClaimPhase `json:"phase,omitempty"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + Capacity *v1.ResourceList `json:"capacity,omitempty"` + Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"` + AllocatedResources *v1.ResourceList `json:"allocatedResources,omitempty"` + AllocatedResourceStatuses map[v1.ResourceName]v1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"` } // PersistentVolumeClaimStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimStatus type for use with @@ -101,19 +99,3 @@ func (b *PersistentVolumeClaimStatusApplyConfiguration) WithAllocatedResourceSta } return b } - -// WithCurrentVolumeAttributesClassName sets the CurrentVolumeAttributesClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CurrentVolumeAttributesClassName field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithCurrentVolumeAttributesClassName(value string) *PersistentVolumeClaimStatusApplyConfiguration { - b.CurrentVolumeAttributesClassName = &value - return b -} - -// WithModifyVolumeStatus sets the ModifyVolumeStatus field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ModifyVolumeStatus field is set to the value of the last call. -func (b *PersistentVolumeClaimStatusApplyConfiguration) WithModifyVolumeStatus(value *ModifyVolumeStatusApplyConfiguration) *PersistentVolumeClaimStatusApplyConfiguration { - b.ModifyVolumeStatus = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go index 8a30dab6495..b3a72b1c3ef 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go @@ -34,7 +34,6 @@ type PersistentVolumeSpecApplyConfiguration struct { MountOptions []string `json:"mountOptions,omitempty"` VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"` NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"` - VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"` } // PersistentVolumeSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeSpec type for use with @@ -286,11 +285,3 @@ func (b *PersistentVolumeSpecApplyConfiguration) WithNodeAffinity(value *VolumeN b.NodeAffinity = value return b } - -// WithVolumeAttributesClassName sets the VolumeAttributesClassName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the VolumeAttributesClassName field is set to the value of the last call. -func (b *PersistentVolumeSpecApplyConfiguration) WithVolumeAttributesClassName(value string) *PersistentVolumeSpecApplyConfiguration { - b.VolumeAttributesClassName = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go index ac1eab3d8c7..7d2492203ee 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go @@ -29,8 +29,6 @@ type PodAffinityTermApplyConfiguration struct { Namespaces []string `json:"namespaces,omitempty"` TopologyKey *string `json:"topologyKey,omitempty"` NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - MatchLabelKeys []string `json:"matchLabelKeys,omitempty"` - MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"` } // PodAffinityTermApplyConfiguration constructs an declarative configuration of the PodAffinityTerm type for use with @@ -72,23 +70,3 @@ func (b *PodAffinityTermApplyConfiguration) WithNamespaceSelector(value *v1.Labe b.NamespaceSelector = value return b } - -// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field. -func (b *PodAffinityTermApplyConfiguration) WithMatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { - for i := range values { - b.MatchLabelKeys = append(b.MatchLabelKeys, values[i]) - } - return b -} - -// WithMismatchLabelKeys adds the given value to the MismatchLabelKeys field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the MismatchLabelKeys field. -func (b *PodAffinityTermApplyConfiguration) WithMismatchLabelKeys(values ...string) *PodAffinityTermApplyConfiguration { - for i := range values { - b.MismatchLabelKeys = append(b.MismatchLabelKeys, values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go index a2ef0a9943d..8d16ea79eb2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go +++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go @@ -25,7 +25,6 @@ type VolumeProjectionApplyConfiguration struct { DownwardAPI *DownwardAPIProjectionApplyConfiguration `json:"downwardAPI,omitempty"` ConfigMap *ConfigMapProjectionApplyConfiguration `json:"configMap,omitempty"` ServiceAccountToken *ServiceAccountTokenProjectionApplyConfiguration `json:"serviceAccountToken,omitempty"` - ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"` } // VolumeProjectionApplyConfiguration constructs an declarative configuration of the VolumeProjection type for use with @@ -65,11 +64,3 @@ func (b *VolumeProjectionApplyConfiguration) WithServiceAccountToken(value *Serv b.ServiceAccountToken = value return b } - -// WithClusterTrustBundle sets the ClusterTrustBundle field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ClusterTrustBundle field is set to the value of the last call. -func (b *VolumeProjectionApplyConfiguration) WithClusterTrustBundle(value *ClusterTrustBundleProjectionApplyConfiguration) *VolumeProjectionApplyConfiguration { - b.ClusterTrustBundle = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go deleted file mode 100644 index 89ad1da8b33..00000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" -) - -// VolumeResourceRequirementsApplyConfiguration represents an declarative configuration of the VolumeResourceRequirements type for use -// with apply. -type VolumeResourceRequirementsApplyConfiguration struct { - Limits *v1.ResourceList `json:"limits,omitempty"` - Requests *v1.ResourceList `json:"requests,omitempty"` -} - -// VolumeResourceRequirementsApplyConfiguration constructs an declarative configuration of the VolumeResourceRequirements type for use with -// apply. -func VolumeResourceRequirements() *VolumeResourceRequirementsApplyConfiguration { - return &VolumeResourceRequirementsApplyConfiguration{} -} - -// WithLimits sets the Limits field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Limits field is set to the value of the last call. -func (b *VolumeResourceRequirementsApplyConfiguration) WithLimits(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { - b.Limits = &value - return b -} - -// WithRequests sets the Requests field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Requests field is set to the value of the last call. -func (b *VolumeResourceRequirementsApplyConfiguration) WithRequests(value v1.ResourceList) *VolumeResourceRequirementsApplyConfiguration { - b.Requests = &value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go index cd21214f5ad..3535d747877 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/exemptprioritylevelconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // ExemptPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the ExemptPriorityLevelConfiguration type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go similarity index 87% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go index d9c8a79cc88..507f8e9abe7 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowdistinguishermethod.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" ) // FlowDistinguisherMethodApplyConfiguration represents an declarative configuration of the FlowDistinguisherMethod type for use // with apply. type FlowDistinguisherMethodApplyConfiguration struct { - Type *v1.FlowDistinguisherMethodType `json:"type,omitempty"` + Type *v1alpha1.FlowDistinguisherMethodType `json:"type,omitempty"` } // FlowDistinguisherMethodApplyConfiguration constructs an declarative configuration of the FlowDistinguisherMethod type for use with @@ -37,7 +37,7 @@ func FlowDistinguisherMethod() *FlowDistinguisherMethodApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { +func (b *FlowDistinguisherMethodApplyConfiguration) WithType(value v1alpha1.FlowDistinguisherMethodType) *FlowDistinguisherMethodApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go index 8809fafbaeb..20251d08bff 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { b := &FlowSchemaApplyConfiguration{} b.WithName(name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") return b } @@ -57,27 +57,27 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration { // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "") } // ExtractFlowSchemaStatus is the same as ExtractFlowSchema except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractFlowSchemaStatus(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { +func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) { return extractFlowSchema(flowSchema, fieldManager, "status") } -func extractFlowSchema(flowSchema *apiflowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { +func extractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) { b := &FlowSchemaApplyConfiguration{} - err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource) + err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(flowSchema.Name) b.WithKind("FlowSchema") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go index 808ab09a551..31f5dc13ed3 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemacondition.go @@ -16,21 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // FlowSchemaConditionApplyConfiguration represents an declarative configuration of the FlowSchemaCondition type for use // with apply. type FlowSchemaConditionApplyConfiguration struct { - Type *v1.FlowSchemaConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1alpha1.FlowSchemaConditionType `json:"type,omitempty"` + Status *v1alpha1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // FlowSchemaConditionApplyConfiguration constructs an declarative configuration of the FlowSchemaCondition type for use with @@ -42,7 +42,7 @@ func FlowSchemaCondition() *FlowSchemaConditionApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1alpha1.FlowSchemaConditionType) *FlowSchemaConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithType(value v1.FlowSchemaCond // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *FlowSchemaConditionApplyConfiguration { b.Status = &value return b } @@ -58,7 +58,7 @@ func (b *FlowSchemaConditionApplyConfiguration) WithStatus(value v1.ConditionSta // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *FlowSchemaConditionApplyConfiguration { +func (b *FlowSchemaConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *FlowSchemaConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go index 2785f5baf3b..fd5fc0ae9aa 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemaspec.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // FlowSchemaSpecApplyConfiguration represents an declarative configuration of the FlowSchemaSpec type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go index 7c61360a535..db2dacf13af 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschemastatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // FlowSchemaStatusApplyConfiguration represents an declarative configuration of the FlowSchemaStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go index 92a03d86282..0421f3f5999 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/groupsubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // GroupSubjectApplyConfiguration represents an declarative configuration of the GroupSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go similarity index 90% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go index c19f0970357..10660e81aaf 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitedprioritylevelconfiguration.go @@ -16,12 +16,12 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // LimitedPriorityLevelConfigurationApplyConfiguration represents an declarative configuration of the LimitedPriorityLevelConfiguration type for use // with apply. type LimitedPriorityLevelConfigurationApplyConfiguration struct { - NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"` + AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"` LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"` LendablePercent *int32 `json:"lendablePercent,omitempty"` BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"` @@ -33,11 +33,11 @@ func LimitedPriorityLevelConfiguration() *LimitedPriorityLevelConfigurationApply return &LimitedPriorityLevelConfigurationApplyConfiguration{} } -// WithNominalConcurrencyShares sets the NominalConcurrencyShares field in the declarative configuration to the given value +// WithAssuredConcurrencyShares sets the AssuredConcurrencyShares field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NominalConcurrencyShares field is set to the value of the last call. -func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithNominalConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { - b.NominalConcurrencyShares = &value +// If called multiple times, the AssuredConcurrencyShares field is set to the value of the last call. +func (b *LimitedPriorityLevelConfigurationApplyConfiguration) WithAssuredConcurrencyShares(value int32) *LimitedPriorityLevelConfigurationApplyConfiguration { + b.AssuredConcurrencyShares = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go similarity index 88% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go index 03ff6d91035..5edaa025cdb 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/limitresponse.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" ) // LimitResponseApplyConfiguration represents an declarative configuration of the LimitResponse type for use // with apply. type LimitResponseApplyConfiguration struct { - Type *v1.LimitResponseType `json:"type,omitempty"` + Type *v1alpha1.LimitResponseType `json:"type,omitempty"` Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"` } @@ -38,7 +38,7 @@ func LimitResponse() *LimitResponseApplyConfiguration { // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *LimitResponseApplyConfiguration) WithType(value v1.LimitResponseType) *LimitResponseApplyConfiguration { +func (b *LimitResponseApplyConfiguration) WithType(value v1alpha1.LimitResponseType) *LimitResponseApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go index d9f8c2eccf6..b1f09f5304c 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/nonresourcepolicyrule.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // NonResourcePolicyRuleApplyConfiguration represents an declarative configuration of the NonResourcePolicyRule type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go index b193efa8bf0..84110406446 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/policyruleswithsubjects.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // PolicyRulesWithSubjectsApplyConfiguration represents an declarative configuration of the PolicyRulesWithSubjects type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go similarity index 94% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go index e8a1b97c9f8..a40db75dcb5 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - apiflowcontrolv1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -42,7 +42,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon b := &PriorityLevelConfigurationApplyConfiguration{} b.WithName(name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") return b } @@ -57,27 +57,27 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "") } // ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) { return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status") } -func extractPriorityLevelConfiguration(priorityLevelConfiguration *apiflowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { +func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) { b := &PriorityLevelConfigurationApplyConfiguration{} - err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource) + err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b, subresource) if err != nil { return nil, err } b.WithName(priorityLevelConfiguration.Name) b.WithKind("PriorityLevelConfiguration") - b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1") + b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1alpha1") return b, nil } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go similarity index 81% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go index 6ce588c8d94..bd91b80f21f 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationcondition.go @@ -16,21 +16,21 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // PriorityLevelConfigurationConditionApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationCondition type for use // with apply. type PriorityLevelConfigurationConditionApplyConfiguration struct { - Type *v1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` - Status *v1.ConditionStatus `json:"status,omitempty"` - LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` - Reason *string `json:"reason,omitempty"` - Message *string `json:"message,omitempty"` + Type *v1alpha1.PriorityLevelConfigurationConditionType `json:"type,omitempty"` + Status *v1alpha1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` } // PriorityLevelConfigurationConditionApplyConfiguration constructs an declarative configuration of the PriorityLevelConfigurationCondition type for use with @@ -42,7 +42,7 @@ func PriorityLevelConfigurationCondition() *PriorityLevelConfigurationConditionA // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v1alpha1.PriorityLevelConfigurationConditionType) *PriorityLevelConfigurationConditionApplyConfiguration { b.Type = &value return b } @@ -50,7 +50,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithType(value v // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value v1alpha1.ConditionStatus) *PriorityLevelConfigurationConditionApplyConfiguration { b.Status = &value return b } @@ -58,7 +58,7 @@ func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithStatus(value // WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the LastTransitionTime field is set to the value of the last call. -func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { +func (b *PriorityLevelConfigurationConditionApplyConfiguration) WithLastTransitionTime(value v1.Time) *PriorityLevelConfigurationConditionApplyConfiguration { b.LastTransitionTime = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go index 0638aee8b80..b477c04df53 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationreference.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // PriorityLevelConfigurationReferenceApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationReference type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go similarity index 92% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go index 5d88749593c..ade920a7556 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationspec.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" ) // PriorityLevelConfigurationSpecApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationSpec type for use // with apply. type PriorityLevelConfigurationSpecApplyConfiguration struct { - Type *v1.PriorityLevelEnablement `json:"type,omitempty"` + Type *v1alpha1.PriorityLevelEnablement `json:"type,omitempty"` Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"` Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"` } @@ -39,7 +39,7 @@ func PriorityLevelConfigurationSpec() *PriorityLevelConfigurationSpecApplyConfig // WithType sets the Type field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Type field is set to the value of the last call. -func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { +func (b *PriorityLevelConfigurationSpecApplyConfiguration) WithType(value v1alpha1.PriorityLevelEnablement) *PriorityLevelConfigurationSpecApplyConfiguration { b.Type = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go index 322871edc69..eb3ef3d61d1 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfigurationstatus.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // PriorityLevelConfigurationStatusApplyConfiguration represents an declarative configuration of the PriorityLevelConfigurationStatus type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go index 69fd2c23ccb..0fccc3f08be 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/queuingconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // QueuingConfigurationApplyConfiguration represents an declarative configuration of the QueuingConfiguration type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go index 0991ce94454..d2c6f4eed6d 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/resourcepolicyrule.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // ResourcePolicyRuleApplyConfiguration represents an declarative configuration of the ResourcePolicyRule type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go similarity index 99% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go index 55787ca7673..270b5225e14 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/serviceaccountsubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // ServiceAccountSubjectApplyConfiguration represents an declarative configuration of the ServiceAccountSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go similarity index 92% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go index f02b03bdc7c..83c09d644b2 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go @@ -16,16 +16,16 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" ) // SubjectApplyConfiguration represents an declarative configuration of the Subject type for use // with apply. type SubjectApplyConfiguration struct { - Kind *v1.SubjectKind `json:"kind,omitempty"` + Kind *v1alpha1.SubjectKind `json:"kind,omitempty"` User *UserSubjectApplyConfiguration `json:"user,omitempty"` Group *GroupSubjectApplyConfiguration `json:"group,omitempty"` ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"` @@ -40,7 +40,7 @@ func Subject() *SubjectApplyConfiguration { // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *SubjectApplyConfiguration) WithKind(value v1.SubjectKind) *SubjectApplyConfiguration { +func (b *SubjectApplyConfiguration) WithKind(value v1alpha1.SubjectKind) *SubjectApplyConfiguration { b.Kind = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go similarity index 98% rename from vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go rename to vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go index 2d17c111c6a..a762c249e09 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go +++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/usersubject.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1alpha1 // UserSubjectApplyConfiguration represents an declarative configuration of the UserSubject type for use // with apply. diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go index 2ceb262217d..3ed553662f6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -1013,6 +1013,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1074,6 +1075,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ + default: {} - name: kind type: scalar: string @@ -1112,6 +1114,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1224,9 +1227,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1338,6 +1343,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1449,6 +1455,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1579,6 +1586,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ + default: {} - name: kind type: scalar: string @@ -1617,9 +1625,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1758,6 +1768,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -1888,6 +1899,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: data type: namedType: __untyped_atomic_ + default: {} - name: kind type: scalar: string @@ -1926,6 +1938,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -2038,9 +2051,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -2152,6 +2167,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -2263,6 +2279,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -2589,6 +2606,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -2842,6 +2860,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: name type: scalar: string @@ -2885,6 +2904,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: metricName type: scalar: string @@ -2919,6 +2939,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -3046,6 +3067,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus map: fields: @@ -3055,6 +3077,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: metricName type: scalar: string @@ -3079,12 +3102,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: io.k8s.api.autoscaling.v2beta1.PodsMetricStatus map: fields: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: metricName type: scalar: string @@ -3114,6 +3139,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: currentAverageValue type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: name type: scalar: string @@ -3250,6 +3276,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -3564,9 +3591,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -3847,9 +3876,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -3971,9 +4002,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -4371,25 +4404,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: timeoutSeconds type: scalar: numeric -- name: io.k8s.api.core.v1.ClusterTrustBundleProjection - map: - fields: - - name: labelSelector - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: name - type: - scalar: string - - name: optional - type: - scalar: boolean - - name: path - type: - scalar: string - default: "" - - name: signerName - type: - scalar: string - name: io.k8s.api.core.v1.ComponentCondition map: fields: @@ -4702,6 +4716,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: io.k8s.api.core.v1.ContainerStateTerminated map: fields: @@ -4715,6 +4730,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: finishedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -4727,6 +4743,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: startedAt type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: io.k8s.api.core.v1.ContainerStateWaiting map: fields: @@ -5082,9 +5099,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + default: {} - name: firstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: involvedObject type: namedType: io.k8s.api.core.v1.ObjectReference @@ -5095,6 +5114,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -5135,6 +5155,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + default: {} - name: io.k8s.api.core.v1.EventSource map: fields: @@ -5317,6 +5338,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + default: {} - name: scheme type: scalar: string @@ -5475,9 +5497,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: httpGet type: namedType: io.k8s.api.core.v1.HTTPGetAction - - name: sleep - type: - namedType: io.k8s.api.core.v1.SleepAction - name: tcpSocket type: namedType: io.k8s.api.core.v1.TCPSocketAction @@ -5548,9 +5567,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: ip type: scalar: string - - name: ipMode - type: - scalar: string - name: ports type: list: @@ -5583,16 +5599,6 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.core.v1.ModifyVolumeStatus - map: - fields: - - name: status - type: - scalar: string - default: "" - - name: targetVolumeAttributesClassName - type: - scalar: string - name: io.k8s.api.core.v1.NFSVolumeSource map: fields: @@ -5634,6 +5640,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -5721,9 +5728,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastHeartbeatTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -6027,9 +6036,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -6061,7 +6072,7 @@ var schemaYAML = typed.YAMLObject(`types: namedType: io.k8s.api.core.v1.TypedObjectReference - name: resources type: - namedType: io.k8s.api.core.v1.VolumeResourceRequirements + namedType: io.k8s.api.core.v1.ResourceRequirements default: {} - name: selector type: @@ -6069,9 +6080,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageClassName type: scalar: string - - name: volumeAttributesClassName - type: - scalar: string - name: volumeMode type: scalar: string @@ -6111,12 +6119,6 @@ var schemaYAML = typed.YAMLObject(`types: elementRelationship: associative keys: - type - - name: currentVolumeAttributesClassName - type: - scalar: string - - name: modifyVolumeStatus - type: - namedType: io.k8s.api.core.v1.ModifyVolumeStatus - name: phase type: scalar: string @@ -6237,9 +6239,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: storageos type: namedType: io.k8s.api.core.v1.StorageOSPersistentVolumeSource - - name: volumeAttributesClassName - type: - scalar: string - name: volumeMode type: scalar: string @@ -6313,18 +6312,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: labelSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector - - name: matchLabelKeys - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: mismatchLabelKeys - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: namespaceSelector type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector @@ -6359,9 +6346,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastProbeTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -6971,6 +6960,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -7046,6 +7036,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: divisor type: namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} - name: resource type: scalar: string @@ -7468,6 +7459,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: targetPort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + default: {} - name: io.k8s.api.core.v1.ServiceSpec map: fields: @@ -7570,13 +7562,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: clientIP type: namedType: io.k8s.api.core.v1.ClientIPConfig -- name: io.k8s.api.core.v1.SleepAction - map: - fields: - - name: seconds - type: - scalar: numeric - default: 0 - name: io.k8s.api.core.v1.StorageOSPersistentVolumeSource map: fields: @@ -7633,6 +7618,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: port type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + default: {} - name: io.k8s.api.core.v1.Taint map: fields: @@ -7893,9 +7879,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.VolumeProjection map: fields: - - name: clusterTrustBundle - type: - namedType: io.k8s.api.core.v1.ClusterTrustBundleProjection - name: configMap type: namedType: io.k8s.api.core.v1.ConfigMapProjection @@ -7908,19 +7891,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: serviceAccountToken type: namedType: io.k8s.api.core.v1.ServiceAccountTokenProjection -- name: io.k8s.api.core.v1.VolumeResourceRequirements - map: - fields: - - name: limits - type: - map: - elementType: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - - name: requests - type: - map: - elementType: - namedType: io.k8s.apimachinery.pkg.api.resource.Quantity - name: io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource map: fields: @@ -8186,9 +8156,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8196,6 +8168,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + default: {} - name: kind type: scalar: string @@ -8238,6 +8211,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + default: {} - name: io.k8s.api.events.v1beta1.Event map: fields: @@ -8253,9 +8227,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: deprecatedFirstTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: deprecatedLastTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: deprecatedSource type: namedType: io.k8s.api.core.v1.EventSource @@ -8263,6 +8239,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: eventTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + default: {} - name: kind type: scalar: string @@ -8305,6 +8282,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastObservedTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime + default: {} - name: io.k8s.api.extensions.v1beta1.DaemonSet map: fields: @@ -8332,6 +8310,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -8447,9 +8426,11 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: lastUpdateTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -8605,6 +8586,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + default: {} - name: io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress map: fields: @@ -8815,6 +8797,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -8896,7 +8879,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxUnavailable type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString -- name: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration map: fields: - name: lendablePercent @@ -8905,14 +8888,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: nominalConcurrencyShares type: scalar: numeric -- name: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod +- name: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod map: fields: - name: type type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1.FlowSchema +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchema map: fields: - name: apiVersion @@ -8927,18 +8910,19 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1.FlowSchemaSpec + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1.FlowSchemaStatus + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus default: {} -- name: io.k8s.api.flowcontrol.v1.FlowSchemaCondition +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -8951,50 +8935,50 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1.FlowSchemaSpec +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaSpec map: fields: - name: distinguisherMethod type: - namedType: io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowDistinguisherMethod - name: matchingPrecedence type: scalar: numeric default: 0 - name: priorityLevelConfiguration type: - namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference default: {} - name: rules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects + namedType: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1.FlowSchemaStatus +- name: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1.FlowSchemaCondition + namedType: io.k8s.api.flowcontrol.v1alpha1.FlowSchemaCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1.GroupSubject +- name: io.k8s.api.flowcontrol.v1alpha1.GroupSubject map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1.LimitResponse +- name: io.k8s.api.flowcontrol.v1alpha1.LimitResponse map: fields: - name: queuing type: - namedType: io.k8s.api.flowcontrol.v1.QueuingConfiguration + namedType: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration - name: type type: scalar: string @@ -9004,9 +8988,13 @@ var schemaYAML = typed.YAMLObject(`types: fields: - fieldName: queuing discriminatorValue: Queuing -- name: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration map: fields: + - name: assuredConcurrencyShares + type: + scalar: numeric + default: 0 - name: borrowingLimitPercent type: scalar: numeric @@ -9015,12 +9003,9 @@ var schemaYAML = typed.YAMLObject(`types: scalar: numeric - name: limitResponse type: - namedType: io.k8s.api.flowcontrol.v1.LimitResponse + namedType: io.k8s.api.flowcontrol.v1alpha1.LimitResponse default: {} - - name: nominalConcurrencyShares - type: - scalar: numeric -- name: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule map: fields: - name: nonResourceURLs @@ -9035,28 +9020,28 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects +- name: io.k8s.api.flowcontrol.v1alpha1.PolicyRulesWithSubjects map: fields: - name: nonResourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1.NonResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1alpha1.NonResourcePolicyRule elementRelationship: atomic - name: resourceRules type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1.ResourcePolicyRule + namedType: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule elementRelationship: atomic - name: subjects type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1.Subject + namedType: io.k8s.api.flowcontrol.v1alpha1.Subject elementRelationship: atomic -- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration map: fields: - name: apiVersion @@ -9071,18 +9056,19 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec default: {} - name: status type: - namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus default: {} -- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition map: fields: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -9095,22 +9081,22 @@ var schemaYAML = typed.YAMLObject(`types: - name: type type: scalar: string -- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationReference map: fields: - name: name type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationSpec map: fields: - name: exempt type: - namedType: io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1alpha1.ExemptPriorityLevelConfiguration - name: limited type: - namedType: io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration + namedType: io.k8s.api.flowcontrol.v1alpha1.LimitedPriorityLevelConfiguration - name: type type: scalar: string @@ -9122,18 +9108,18 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: Exempt - fieldName: limited discriminatorValue: Limited -- name: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus +- name: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationStatus map: fields: - name: conditions type: list: elementType: - namedType: io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition + namedType: io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfigurationCondition elementRelationship: associative keys: - type -- name: io.k8s.api.flowcontrol.v1.QueuingConfiguration +- name: io.k8s.api.flowcontrol.v1alpha1.QueuingConfiguration map: fields: - name: handSize @@ -9148,7 +9134,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 -- name: io.k8s.api.flowcontrol.v1.ResourcePolicyRule +- name: io.k8s.api.flowcontrol.v1alpha1.ResourcePolicyRule map: fields: - name: apiGroups @@ -9178,7 +9164,7 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: associative -- name: io.k8s.api.flowcontrol.v1.ServiceAccountSubject +- name: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject map: fields: - name: name @@ -9189,22 +9175,22 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.flowcontrol.v1.Subject +- name: io.k8s.api.flowcontrol.v1alpha1.Subject map: fields: - name: group type: - namedType: io.k8s.api.flowcontrol.v1.GroupSubject + namedType: io.k8s.api.flowcontrol.v1alpha1.GroupSubject - name: kind type: scalar: string default: "" - name: serviceAccount type: - namedType: io.k8s.api.flowcontrol.v1.ServiceAccountSubject + namedType: io.k8s.api.flowcontrol.v1alpha1.ServiceAccountSubject - name: user type: - namedType: io.k8s.api.flowcontrol.v1.UserSubject + namedType: io.k8s.api.flowcontrol.v1alpha1.UserSubject unions: - discriminator: kind fields: @@ -9214,7 +9200,7 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: ServiceAccount - fieldName: user discriminatorValue: User -- name: io.k8s.api.flowcontrol.v1.UserSubject +- name: io.k8s.api.flowcontrol.v1alpha1.UserSubject map: fields: - name: name @@ -9264,6 +9250,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -9409,6 +9396,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -9590,6 +9578,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -9735,6 +9724,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -9916,6 +9906,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -10061,6 +10052,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -10572,7 +10564,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric -- name: io.k8s.api.networking.v1alpha1.IPAddress +- name: io.k8s.api.networking.v1alpha1.ClusterCIDR map: fields: - name: apiVersion @@ -10587,30 +10579,27 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec + namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec default: {} -- name: io.k8s.api.networking.v1alpha1.IPAddressSpec +- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec map: fields: - - name: parentRef - type: - namedType: io.k8s.api.networking.v1alpha1.ParentReference -- name: io.k8s.api.networking.v1alpha1.ParentReference - map: - fields: - - name: group + - name: ipv4 type: scalar: string - - name: name + default: "" + - name: ipv6 type: scalar: string - - name: namespace + default: "" + - name: nodeSelector type: - scalar: string - - name: resource + namedType: io.k8s.api.core.v1.NodeSelector + - name: perNodeHostBits type: - scalar: string -- name: io.k8s.api.networking.v1alpha1.ServiceCIDR + scalar: numeric + default: 0 +- name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: - name: apiVersion @@ -10625,32 +10614,32 @@ var schemaYAML = typed.YAMLObject(`types: default: {} - name: spec type: - namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec - default: {} - - name: status - type: - namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus + namedType: io.k8s.api.networking.v1alpha1.IPAddressSpec default: {} -- name: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec +- name: io.k8s.api.networking.v1alpha1.IPAddressSpec map: fields: - - name: cidrs + - name: parentRef type: - list: - elementType: - scalar: string - elementRelationship: atomic -- name: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus + namedType: io.k8s.api.networking.v1alpha1.ParentReference +- name: io.k8s.api.networking.v1alpha1.ParentReference map: fields: - - name: conditions + - name: group type: - list: - elementType: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition - elementRelationship: associative - keys: - - type + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + - name: uid + type: + scalar: string - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: @@ -10706,6 +10695,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: servicePort type: namedType: io.k8s.apimachinery.pkg.util.intstr.IntOrString + default: {} - name: io.k8s.api.networking.v1beta1.IngressClass map: fields: @@ -11071,6 +11061,29 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric +- name: io.k8s.api.policy.v1beta1.AllowedCSIDriver + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.policy.v1beta1.AllowedFlexVolume + map: + fields: + - name: driver + type: + scalar: string + default: "" +- name: io.k8s.api.policy.v1beta1.AllowedHostPath + map: + fields: + - name: pathPrefix + type: + scalar: string + - name: readOnly + type: + scalar: boolean - name: io.k8s.api.policy.v1beta1.Eviction map: fields: @@ -11087,6 +11100,40 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta default: {} +- name: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.IDRange + elementRelationship: atomic + - name: rule + type: + scalar: string +- name: io.k8s.api.policy.v1beta1.HostPortRange + map: + fields: + - name: max + type: + scalar: numeric + default: 0 + - name: min + type: + scalar: numeric + default: 0 +- name: io.k8s.api.policy.v1beta1.IDRange + map: + fields: + - name: max + type: + scalar: numeric + default: 0 + - name: min + type: + scalar: numeric + default: 0 - name: io.k8s.api.policy.v1beta1.PodDisruptionBudget map: fields: @@ -11158,6 +11205,195 @@ var schemaYAML = typed.YAMLObject(`types: - name: observedGeneration type: scalar: numeric +- name: io.k8s.api.policy.v1beta1.PodSecurityPolicy + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec + default: {} +- name: io.k8s.api.policy.v1beta1.PodSecurityPolicySpec + map: + fields: + - name: allowPrivilegeEscalation + type: + scalar: boolean + - name: allowedCSIDrivers + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.AllowedCSIDriver + elementRelationship: atomic + - name: allowedCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: allowedFlexVolumes + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.AllowedFlexVolume + elementRelationship: atomic + - name: allowedHostPaths + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.AllowedHostPath + elementRelationship: atomic + - name: allowedProcMountTypes + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: allowedUnsafeSysctls + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultAddCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultAllowPrivilegeEscalation + type: + scalar: boolean + - name: forbiddenSysctls + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: fsGroup + type: + namedType: io.k8s.api.policy.v1beta1.FSGroupStrategyOptions + default: {} + - name: hostIPC + type: + scalar: boolean + - name: hostNetwork + type: + scalar: boolean + - name: hostPID + type: + scalar: boolean + - name: hostPorts + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.HostPortRange + elementRelationship: atomic + - name: privileged + type: + scalar: boolean + - name: readOnlyRootFilesystem + type: + scalar: boolean + - name: requiredDropCapabilities + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: runAsGroup + type: + namedType: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions + - name: runAsUser + type: + namedType: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions + default: {} + - name: runtimeClass + type: + namedType: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions + - name: seLinux + type: + namedType: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions + default: {} + - name: supplementalGroups + type: + namedType: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions + default: {} + - name: volumes + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.policy.v1beta1.RunAsGroupStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.IDRange + elementRelationship: atomic + - name: rule + type: + scalar: string + default: "" +- name: io.k8s.api.policy.v1beta1.RunAsUserStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.IDRange + elementRelationship: atomic + - name: rule + type: + scalar: string + default: "" +- name: io.k8s.api.policy.v1beta1.RuntimeClassStrategyOptions + map: + fields: + - name: allowedRuntimeClassNames + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: defaultRuntimeClassName + type: + scalar: string +- name: io.k8s.api.policy.v1beta1.SELinuxStrategyOptions + map: + fields: + - name: rule + type: + scalar: string + default: "" + - name: seLinuxOptions + type: + namedType: io.k8s.api.core.v1.SELinuxOptions +- name: io.k8s.api.policy.v1beta1.SupplementalGroupsStrategyOptions + map: + fields: + - name: ranges + type: + list: + elementType: + namedType: io.k8s.api.policy.v1beta1.IDRange + elementRelationship: atomic + - name: rule + type: + scalar: string - name: io.k8s.api.rbac.v1.AggregationRule map: fields: @@ -11687,7 +11923,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: atomic + elementRelationship: associative - name: selectedNode type: scalar: string @@ -11766,7 +12002,7 @@ var schemaYAML = typed.YAMLObject(`types: list: elementType: scalar: string - elementRelationship: atomic + elementRelationship: associative - name: io.k8s.api.resource.v1alpha2.ResourceClaimSpec map: fields: @@ -12204,6 +12440,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: io.k8s.api.storage.v1.VolumeNodeResources map: fields: @@ -12299,28 +12536,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: detachError type: namedType: io.k8s.api.storage.v1alpha1.VolumeError -- name: io.k8s.api.storage.v1alpha1.VolumeAttributesClass - map: - fields: - - name: apiVersion - type: - scalar: string - - name: driverName - type: - scalar: string - default: "" - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: parameters - type: - map: - elementType: - scalar: string - name: io.k8s.api.storage.v1alpha1.VolumeError map: fields: @@ -12330,6 +12545,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: io.k8s.api.storage.v1beta1.CSIDriver map: fields: @@ -12579,6 +12795,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: time type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: io.k8s.api.storage.v1beta1.VolumeNodeResources map: fields: @@ -12593,6 +12810,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: lastTransitionTime type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: message type: scalar: string @@ -12720,6 +12938,7 @@ var schemaYAML = typed.YAMLObject(`types: - name: creationTimestamp type: namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: deletionGracePeriodSeconds type: scalar: numeric diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go index a206bd326ae..8a58d9e8705 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go +++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/unstructured.go @@ -125,7 +125,7 @@ func (e *extractor) extractUnstructured(object *unstructured.Unstructured, field return nil, fmt.Errorf("failed to fetch the objectType: %v", err) } result := &unstructured.Unstructured{} - err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) //nolint:forbidigo + err = managedfields.ExtractInto(object, *objectType, fieldManager, result, subresource) if err != nil { return nil, fmt.Errorf("failed calling ExtractInto for unstructured: %v", err) } diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go similarity index 68% rename from vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go rename to vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go index f6d0a91e009..ad0eae9198e 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go @@ -27,56 +27,55 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use +// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use // with apply. -type ServiceCIDRApplyConfiguration struct { +type ClusterCIDRApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"` - Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"` + Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` } -// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with +// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with // apply. -func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration { - b := &ServiceCIDRApplyConfiguration{} +func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { + b := &ClusterCIDRApplyConfiguration{} b.WithName(name) - b.WithKind("ServiceCIDR") + b.WithKind("ClusterCIDR") b.WithAPIVersion("networking.k8s.io/v1alpha1") return b } -// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from -// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a -// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from +// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a +// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API. -// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. +// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { - return extractServiceCIDR(serviceCIDR, fieldManager, "") +func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { + return extractClusterCIDR(clusterCIDR, fieldManager, "") } -// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except +// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractServiceCIDRStatus(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) { - return extractServiceCIDR(serviceCIDR, fieldManager, "status") +func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { + return extractClusterCIDR(clusterCIDR, fieldManager, "status") } -func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) { - b := &ServiceCIDRApplyConfiguration{} - err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ServiceCIDR"), fieldManager, b, subresource) +func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { + b := &ClusterCIDRApplyConfiguration{} + err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(serviceCIDR.Name) + b.WithName(clusterCIDR.Name) - b.WithKind("ServiceCIDR") + b.WithKind("ClusterCIDR") b.WithAPIVersion("networking.k8s.io/v1alpha1") return b, nil } @@ -84,7 +83,7 @@ func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManage // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { b.Kind = &value return b } @@ -92,7 +91,7 @@ func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApply // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { b.APIVersion = &value return b } @@ -100,7 +99,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCID // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -109,7 +108,7 @@ func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApply // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -118,7 +117,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceC // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -127,7 +126,7 @@ func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDR // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -136,7 +135,7 @@ func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApp // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -145,7 +144,7 @@ func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *Servi // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -154,7 +153,7 @@ func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDR // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -163,7 +162,7 @@ func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -172,7 +171,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -182,7 +181,7 @@ func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -197,7 +196,7 @@ func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *S // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -211,7 +210,7 @@ func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]strin // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -225,7 +224,7 @@ func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerR // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -233,7 +232,7 @@ func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *Servic return b } -func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } @@ -242,15 +241,7 @@ func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists // WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Spec field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration { +func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { b.Spec = value return b } - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration { - b.Status = value - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go new file mode 100644 index 00000000000..8d5fa406b09 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use +// with apply. +type ClusterCIDRSpecApplyConfiguration struct { + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` + IPv4 *string `json:"ipv4,omitempty"` + IPv6 *string `json:"ipv6,omitempty"` +} + +// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with +// apply. +func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { + return &ClusterCIDRSpecApplyConfiguration{} +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PerNodeHostBits field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { + b.PerNodeHostBits = &value + return b +} + +// WithIPv4 sets the IPv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv4 field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { + b.IPv4 = &value + return b +} + +// WithIPv6 sets the IPv6 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6 field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { + b.IPv6 = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go index ce1049709a0..14b10b19ff6 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go +++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/parentreference.go @@ -18,13 +18,18 @@ limitations under the License. package v1alpha1 +import ( + types "k8s.io/apimachinery/pkg/types" +) + // ParentReferenceApplyConfiguration represents an declarative configuration of the ParentReference type for use // with apply. type ParentReferenceApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Resource *string `json:"resource,omitempty"` - Namespace *string `json:"namespace,omitempty"` - Name *string `json:"name,omitempty"` + Group *string `json:"group,omitempty"` + Resource *string `json:"resource,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + UID *types.UID `json:"uid,omitempty"` } // ParentReferenceApplyConfiguration constructs an declarative configuration of the ParentReference type for use with @@ -64,3 +69,11 @@ func (b *ParentReferenceApplyConfiguration) WithName(value string) *ParentRefere b.Name = &value return b } + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ParentReferenceApplyConfiguration) WithUID(value types.UID) *ParentReferenceApplyConfiguration { + b.UID = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go deleted file mode 100644 index 302d69194ce..00000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrspec.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use -// with apply. -type ServiceCIDRSpecApplyConfiguration struct { - CIDRs []string `json:"cidrs,omitempty"` -} - -// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with -// apply. -func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration { - return &ServiceCIDRSpecApplyConfiguration{} -} - -// WithCIDRs adds the given value to the CIDRs field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the CIDRs field. -func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration { - for i := range values { - b.CIDRs = append(b.CIDRs, values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go deleted file mode 100644 index 5afc549a650..00000000000 --- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1alpha1/servicecidrstatus.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use -// with apply. -type ServiceCIDRStatusApplyConfiguration struct { - Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"` -} - -// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with -// apply. -func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration { - return &ServiceCIDRStatusApplyConfiguration{} -} - -// WithConditions adds the given value to the Conditions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithConditions") - } - b.Conditions = append(b.Conditions, *values[i]) - } - return b -} diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go similarity index 51% rename from vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go rename to vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go index 8b3284536ad..27b49bf1538 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedcsidriver.go @@ -16,24 +16,24 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1 +package v1beta1 -// SleepActionApplyConfiguration represents an declarative configuration of the SleepAction type for use +// AllowedCSIDriverApplyConfiguration represents an declarative configuration of the AllowedCSIDriver type for use // with apply. -type SleepActionApplyConfiguration struct { - Seconds *int64 `json:"seconds,omitempty"` +type AllowedCSIDriverApplyConfiguration struct { + Name *string `json:"name,omitempty"` } -// SleepActionApplyConfiguration constructs an declarative configuration of the SleepAction type for use with +// AllowedCSIDriverApplyConfiguration constructs an declarative configuration of the AllowedCSIDriver type for use with // apply. -func SleepAction() *SleepActionApplyConfiguration { - return &SleepActionApplyConfiguration{} +func AllowedCSIDriver() *AllowedCSIDriverApplyConfiguration { + return &AllowedCSIDriverApplyConfiguration{} } -// WithSeconds sets the Seconds field in the declarative configuration to the given value +// WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Seconds field is set to the value of the last call. -func (b *SleepActionApplyConfiguration) WithSeconds(value int64) *SleepActionApplyConfiguration { - b.Seconds = &value +// If called multiple times, the Name field is set to the value of the last call. +func (b *AllowedCSIDriverApplyConfiguration) WithName(value string) *AllowedCSIDriverApplyConfiguration { + b.Name = &value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go new file mode 100644 index 00000000000..30c3724cfee --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedflexvolume.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AllowedFlexVolumeApplyConfiguration represents an declarative configuration of the AllowedFlexVolume type for use +// with apply. +type AllowedFlexVolumeApplyConfiguration struct { + Driver *string `json:"driver,omitempty"` +} + +// AllowedFlexVolumeApplyConfiguration constructs an declarative configuration of the AllowedFlexVolume type for use with +// apply. +func AllowedFlexVolume() *AllowedFlexVolumeApplyConfiguration { + return &AllowedFlexVolumeApplyConfiguration{} +} + +// WithDriver sets the Driver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Driver field is set to the value of the last call. +func (b *AllowedFlexVolumeApplyConfiguration) WithDriver(value string) *AllowedFlexVolumeApplyConfiguration { + b.Driver = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go new file mode 100644 index 00000000000..493815d8d4a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/allowedhostpath.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// AllowedHostPathApplyConfiguration represents an declarative configuration of the AllowedHostPath type for use +// with apply. +type AllowedHostPathApplyConfiguration struct { + PathPrefix *string `json:"pathPrefix,omitempty"` + ReadOnly *bool `json:"readOnly,omitempty"` +} + +// AllowedHostPathApplyConfiguration constructs an declarative configuration of the AllowedHostPath type for use with +// apply. +func AllowedHostPath() *AllowedHostPathApplyConfiguration { + return &AllowedHostPathApplyConfiguration{} +} + +// WithPathPrefix sets the PathPrefix field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PathPrefix field is set to the value of the last call. +func (b *AllowedHostPathApplyConfiguration) WithPathPrefix(value string) *AllowedHostPathApplyConfiguration { + b.PathPrefix = &value + return b +} + +// WithReadOnly sets the ReadOnly field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadOnly field is set to the value of the last call. +func (b *AllowedHostPathApplyConfiguration) WithReadOnly(value bool) *AllowedHostPathApplyConfiguration { + b.ReadOnly = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go new file mode 100644 index 00000000000..06803b439df --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/fsgroupstrategyoptions.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" +) + +// FSGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the FSGroupStrategyOptions type for use +// with apply. +type FSGroupStrategyOptionsApplyConfiguration struct { + Rule *v1beta1.FSGroupStrategyType `json:"rule,omitempty"` + Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` +} + +// FSGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the FSGroupStrategyOptions type for use with +// apply. +func FSGroupStrategyOptions() *FSGroupStrategyOptionsApplyConfiguration { + return &FSGroupStrategyOptionsApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *FSGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.FSGroupStrategyType) *FSGroupStrategyOptionsApplyConfiguration { + b.Rule = &value + return b +} + +// WithRanges adds the given value to the Ranges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ranges field. +func (b *FSGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *FSGroupStrategyOptionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRanges") + } + b.Ranges = append(b.Ranges, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go new file mode 100644 index 00000000000..7c796881393 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/hostportrange.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// HostPortRangeApplyConfiguration represents an declarative configuration of the HostPortRange type for use +// with apply. +type HostPortRangeApplyConfiguration struct { + Min *int32 `json:"min,omitempty"` + Max *int32 `json:"max,omitempty"` +} + +// HostPortRangeApplyConfiguration constructs an declarative configuration of the HostPortRange type for use with +// apply. +func HostPortRange() *HostPortRangeApplyConfiguration { + return &HostPortRangeApplyConfiguration{} +} + +// WithMin sets the Min field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Min field is set to the value of the last call. +func (b *HostPortRangeApplyConfiguration) WithMin(value int32) *HostPortRangeApplyConfiguration { + b.Min = &value + return b +} + +// WithMax sets the Max field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Max field is set to the value of the last call. +func (b *HostPortRangeApplyConfiguration) WithMax(value int32) *HostPortRangeApplyConfiguration { + b.Max = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go new file mode 100644 index 00000000000..af46f76581a --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/idrange.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// IDRangeApplyConfiguration represents an declarative configuration of the IDRange type for use +// with apply. +type IDRangeApplyConfiguration struct { + Min *int64 `json:"min,omitempty"` + Max *int64 `json:"max,omitempty"` +} + +// IDRangeApplyConfiguration constructs an declarative configuration of the IDRange type for use with +// apply. +func IDRange() *IDRangeApplyConfiguration { + return &IDRangeApplyConfiguration{} +} + +// WithMin sets the Min field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Min field is set to the value of the last call. +func (b *IDRangeApplyConfiguration) WithMin(value int64) *IDRangeApplyConfiguration { + b.Min = &value + return b +} + +// WithMax sets the Max field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Max field is set to the value of the last call. +func (b *IDRangeApplyConfiguration) WithMax(value int64) *IDRangeApplyConfiguration { + b.Max = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go similarity index 59% rename from vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go rename to vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go index 9d4c476259e..46cfc4de1ec 100644 --- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by applyconfiguration-gen. DO NOT EDIT. -package v1alpha1 +package v1beta1 import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" + policyv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" managedfields "k8s.io/apimachinery/pkg/util/managedfields" @@ -27,64 +27,63 @@ import ( v1 "k8s.io/client-go/applyconfigurations/meta/v1" ) -// VolumeAttributesClassApplyConfiguration represents an declarative configuration of the VolumeAttributesClass type for use +// PodSecurityPolicyApplyConfiguration represents an declarative configuration of the PodSecurityPolicy type for use // with apply. -type VolumeAttributesClassApplyConfiguration struct { +type PodSecurityPolicyApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - DriverName *string `json:"driverName,omitempty"` - Parameters map[string]string `json:"parameters,omitempty"` + Spec *PodSecurityPolicySpecApplyConfiguration `json:"spec,omitempty"` } -// VolumeAttributesClass constructs an declarative configuration of the VolumeAttributesClass type for use with +// PodSecurityPolicy constructs an declarative configuration of the PodSecurityPolicy type for use with // apply. -func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration { - b := &VolumeAttributesClassApplyConfiguration{} +func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration { + b := &PodSecurityPolicyApplyConfiguration{} b.WithName(name) - b.WithKind("VolumeAttributesClass") - b.WithAPIVersion("storage.k8s.io/v1alpha1") + b.WithKind("PodSecurityPolicy") + b.WithAPIVersion("policy/v1beta1") return b } -// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from -// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a -// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable), +// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from +// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a +// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable), // APIVersion and Kind populated. It is possible that no managed fields were found for because other // field managers have taken ownership of all the fields previously owned by fieldManager, or because // the fieldManager never owned fields any fields. -// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API. -// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow. +// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API. +// ExtractPodSecurityPolicy provides a way to perform a extract/modify-in-place/apply workflow. // Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously // applied if another fieldManager has updated or force applied any of the previously applied fields. // Experimental! -func ExtractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { - return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "") +func ExtractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { + return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "") } -// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except +// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except // that it extracts the status subresource applied configuration. // Experimental! -func ExtractVolumeAttributesClassStatus(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) { - return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status") +func ExtractPodSecurityPolicyStatus(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) { + return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status") } -func extractVolumeAttributesClass(volumeAttributesClass *v1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) { - b := &VolumeAttributesClassApplyConfiguration{} - err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource) +func extractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) { + b := &PodSecurityPolicyApplyConfiguration{} + err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource) if err != nil { return nil, err } - b.WithName(volumeAttributesClass.Name) + b.WithName(podSecurityPolicy.Name) - b.WithKind("VolumeAttributesClass") - b.WithAPIVersion("storage.k8s.io/v1alpha1") + b.WithKind("PodSecurityPolicy") + b.WithAPIVersion("policy/v1beta1") return b, nil } // WithKind sets the Kind field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Kind field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithKind(value string) *PodSecurityPolicyApplyConfiguration { b.Kind = &value return b } @@ -92,7 +91,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithKind(value string) *Volume // WithAPIVersion sets the APIVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the APIVersion field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithAPIVersion(value string) *PodSecurityPolicyApplyConfiguration { b.APIVersion = &value return b } @@ -100,7 +99,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAPIVersion(value string) * // WithName sets the Name field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Name field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithName(value string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Name = &value return b @@ -109,7 +108,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithName(value string) *Volume // WithGenerateName sets the GenerateName field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the GenerateName field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithGenerateName(value string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.GenerateName = &value return b @@ -118,7 +117,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGenerateName(value string) // WithNamespace sets the Namespace field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Namespace field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithNamespace(value string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Namespace = &value return b @@ -127,7 +126,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithNamespace(value string) *V // WithUID sets the UID field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the UID field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithUID(value types.UID) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.UID = &value return b @@ -136,7 +135,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithUID(value types.UID) *Volu // WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithResourceVersion(value string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.ResourceVersion = &value return b @@ -145,7 +144,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithResourceVersion(value stri // WithGeneration sets the Generation field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Generation field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithGeneration(value int64) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.Generation = &value return b @@ -154,7 +153,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithGeneration(value int64) *V // WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.CreationTimestamp = &value return b @@ -163,7 +162,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithCreationTimestamp(value me // WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionTimestamp = &value return b @@ -172,7 +171,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionTimestamp(value me // WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() b.DeletionGracePeriodSeconds = &value return b @@ -182,7 +181,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithDeletionGracePeriodSeconds // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Labels field, // overwriting an existing map entries in Labels field with the same key. -func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string]string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithLabels(entries map[string]string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Labels == nil && len(entries) > 0 { b.Labels = make(map[string]string, len(entries)) @@ -197,7 +196,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithLabels(entries map[string] // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, the entries provided by each call will be put on the Annotations field, // overwriting an existing map entries in Annotations field with the same key. -func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[string]string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithAnnotations(entries map[string]string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() if b.Annotations == nil && len(entries) > 0 { b.Annotations = make(map[string]string, len(entries)) @@ -211,7 +210,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithAnnotations(entries map[st // WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { if values[i] == nil { @@ -225,7 +224,7 @@ func (b *VolumeAttributesClassApplyConfiguration) WithOwnerReferences(values ... // WithFinalizers adds the given value to the Finalizers field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...string) *VolumeAttributesClassApplyConfiguration { +func (b *PodSecurityPolicyApplyConfiguration) WithFinalizers(values ...string) *PodSecurityPolicyApplyConfiguration { b.ensureObjectMetaApplyConfigurationExists() for i := range values { b.Finalizers = append(b.Finalizers, values[i]) @@ -233,30 +232,16 @@ func (b *VolumeAttributesClassApplyConfiguration) WithFinalizers(values ...strin return b } -func (b *VolumeAttributesClassApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { +func (b *PodSecurityPolicyApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { if b.ObjectMetaApplyConfiguration == nil { b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} } } -// WithDriverName sets the DriverName field in the declarative configuration to the given value +// WithSpec sets the Spec field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DriverName field is set to the value of the last call. -func (b *VolumeAttributesClassApplyConfiguration) WithDriverName(value string) *VolumeAttributesClassApplyConfiguration { - b.DriverName = &value - return b -} - -// WithParameters puts the entries into the Parameters field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Parameters field, -// overwriting an existing map entries in Parameters field with the same key. -func (b *VolumeAttributesClassApplyConfiguration) WithParameters(entries map[string]string) *VolumeAttributesClassApplyConfiguration { - if b.Parameters == nil && len(entries) > 0 { - b.Parameters = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Parameters[k] = v - } +// If called multiple times, the Spec field is set to the value of the last call. +func (b *PodSecurityPolicyApplyConfiguration) WithSpec(value *PodSecurityPolicySpecApplyConfiguration) *PodSecurityPolicyApplyConfiguration { + b.Spec = value return b } diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go new file mode 100644 index 00000000000..bf951cf56b6 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicyspec.go @@ -0,0 +1,285 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/policy/v1beta1" +) + +// PodSecurityPolicySpecApplyConfiguration represents an declarative configuration of the PodSecurityPolicySpec type for use +// with apply. +type PodSecurityPolicySpecApplyConfiguration struct { + Privileged *bool `json:"privileged,omitempty"` + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty"` + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty"` + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty"` + Volumes []v1beta1.FSType `json:"volumes,omitempty"` + HostNetwork *bool `json:"hostNetwork,omitempty"` + HostPorts []HostPortRangeApplyConfiguration `json:"hostPorts,omitempty"` + HostPID *bool `json:"hostPID,omitempty"` + HostIPC *bool `json:"hostIPC,omitempty"` + SELinux *SELinuxStrategyOptionsApplyConfiguration `json:"seLinux,omitempty"` + RunAsUser *RunAsUserStrategyOptionsApplyConfiguration `json:"runAsUser,omitempty"` + RunAsGroup *RunAsGroupStrategyOptionsApplyConfiguration `json:"runAsGroup,omitempty"` + SupplementalGroups *SupplementalGroupsStrategyOptionsApplyConfiguration `json:"supplementalGroups,omitempty"` + FSGroup *FSGroupStrategyOptionsApplyConfiguration `json:"fsGroup,omitempty"` + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty"` + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"` + AllowedHostPaths []AllowedHostPathApplyConfiguration `json:"allowedHostPaths,omitempty"` + AllowedFlexVolumes []AllowedFlexVolumeApplyConfiguration `json:"allowedFlexVolumes,omitempty"` + AllowedCSIDrivers []AllowedCSIDriverApplyConfiguration `json:"allowedCSIDrivers,omitempty"` + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"` + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty"` + AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty"` + RuntimeClass *RuntimeClassStrategyOptionsApplyConfiguration `json:"runtimeClass,omitempty"` +} + +// PodSecurityPolicySpecApplyConfiguration constructs an declarative configuration of the PodSecurityPolicySpec type for use with +// apply. +func PodSecurityPolicySpec() *PodSecurityPolicySpecApplyConfiguration { + return &PodSecurityPolicySpecApplyConfiguration{} +} + +// WithPrivileged sets the Privileged field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Privileged field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithPrivileged(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.Privileged = &value + return b +} + +// WithDefaultAddCapabilities adds the given value to the DefaultAddCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DefaultAddCapabilities field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAddCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.DefaultAddCapabilities = append(b.DefaultAddCapabilities, values[i]) + } + return b +} + +// WithRequiredDropCapabilities adds the given value to the RequiredDropCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the RequiredDropCapabilities field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithRequiredDropCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.RequiredDropCapabilities = append(b.RequiredDropCapabilities, values[i]) + } + return b +} + +// WithAllowedCapabilities adds the given value to the AllowedCapabilities field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedCapabilities field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCapabilities(values ...v1.Capability) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.AllowedCapabilities = append(b.AllowedCapabilities, values[i]) + } + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithVolumes(values ...v1beta1.FSType) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.Volumes = append(b.Volumes, values[i]) + } + return b +} + +// WithHostNetwork sets the HostNetwork field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostNetwork field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithHostNetwork(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.HostNetwork = &value + return b +} + +// WithHostPorts adds the given value to the HostPorts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the HostPorts field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPorts(values ...*HostPortRangeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithHostPorts") + } + b.HostPorts = append(b.HostPorts, *values[i]) + } + return b +} + +// WithHostPID sets the HostPID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostPID field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithHostPID(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.HostPID = &value + return b +} + +// WithHostIPC sets the HostIPC field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HostIPC field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithHostIPC(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.HostIPC = &value + return b +} + +// WithSELinux sets the SELinux field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinux field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithSELinux(value *SELinuxStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + b.SELinux = value + return b +} + +// WithRunAsUser sets the RunAsUser field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RunAsUser field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsUser(value *RunAsUserStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + b.RunAsUser = value + return b +} + +// WithRunAsGroup sets the RunAsGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RunAsGroup field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithRunAsGroup(value *RunAsGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + b.RunAsGroup = value + return b +} + +// WithSupplementalGroups sets the SupplementalGroups field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SupplementalGroups field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithSupplementalGroups(value *SupplementalGroupsStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + b.SupplementalGroups = value + return b +} + +// WithFSGroup sets the FSGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FSGroup field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithFSGroup(value *FSGroupStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + b.FSGroup = value + return b +} + +// WithReadOnlyRootFilesystem sets the ReadOnlyRootFilesystem field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadOnlyRootFilesystem field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithReadOnlyRootFilesystem(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.ReadOnlyRootFilesystem = &value + return b +} + +// WithDefaultAllowPrivilegeEscalation sets the DefaultAllowPrivilegeEscalation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultAllowPrivilegeEscalation field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithDefaultAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.DefaultAllowPrivilegeEscalation = &value + return b +} + +// WithAllowPrivilegeEscalation sets the AllowPrivilegeEscalation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowPrivilegeEscalation field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowPrivilegeEscalation(value bool) *PodSecurityPolicySpecApplyConfiguration { + b.AllowPrivilegeEscalation = &value + return b +} + +// WithAllowedHostPaths adds the given value to the AllowedHostPaths field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedHostPaths field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedHostPaths(values ...*AllowedHostPathApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllowedHostPaths") + } + b.AllowedHostPaths = append(b.AllowedHostPaths, *values[i]) + } + return b +} + +// WithAllowedFlexVolumes adds the given value to the AllowedFlexVolumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedFlexVolumes field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedFlexVolumes(values ...*AllowedFlexVolumeApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllowedFlexVolumes") + } + b.AllowedFlexVolumes = append(b.AllowedFlexVolumes, *values[i]) + } + return b +} + +// WithAllowedCSIDrivers adds the given value to the AllowedCSIDrivers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedCSIDrivers field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedCSIDrivers(values ...*AllowedCSIDriverApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithAllowedCSIDrivers") + } + b.AllowedCSIDrivers = append(b.AllowedCSIDrivers, *values[i]) + } + return b +} + +// WithAllowedUnsafeSysctls adds the given value to the AllowedUnsafeSysctls field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedUnsafeSysctls field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedUnsafeSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.AllowedUnsafeSysctls = append(b.AllowedUnsafeSysctls, values[i]) + } + return b +} + +// WithForbiddenSysctls adds the given value to the ForbiddenSysctls field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ForbiddenSysctls field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithForbiddenSysctls(values ...string) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.ForbiddenSysctls = append(b.ForbiddenSysctls, values[i]) + } + return b +} + +// WithAllowedProcMountTypes adds the given value to the AllowedProcMountTypes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedProcMountTypes field. +func (b *PodSecurityPolicySpecApplyConfiguration) WithAllowedProcMountTypes(values ...v1.ProcMountType) *PodSecurityPolicySpecApplyConfiguration { + for i := range values { + b.AllowedProcMountTypes = append(b.AllowedProcMountTypes, values[i]) + } + return b +} + +// WithRuntimeClass sets the RuntimeClass field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RuntimeClass field is set to the value of the last call. +func (b *PodSecurityPolicySpecApplyConfiguration) WithRuntimeClass(value *RuntimeClassStrategyOptionsApplyConfiguration) *PodSecurityPolicySpecApplyConfiguration { + b.RuntimeClass = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go new file mode 100644 index 00000000000..fcfcfbe6b92 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasgroupstrategyoptions.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" +) + +// RunAsGroupStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsGroupStrategyOptions type for use +// with apply. +type RunAsGroupStrategyOptionsApplyConfiguration struct { + Rule *v1beta1.RunAsGroupStrategy `json:"rule,omitempty"` + Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` +} + +// RunAsGroupStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsGroupStrategyOptions type for use with +// apply. +func RunAsGroupStrategyOptions() *RunAsGroupStrategyOptionsApplyConfiguration { + return &RunAsGroupStrategyOptionsApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsGroupStrategy) *RunAsGroupStrategyOptionsApplyConfiguration { + b.Rule = &value + return b +} + +// WithRanges adds the given value to the Ranges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ranges field. +func (b *RunAsGroupStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsGroupStrategyOptionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRanges") + } + b.Ranges = append(b.Ranges, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go new file mode 100644 index 00000000000..a6d6ee58e37 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runasuserstrategyoptions.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" +) + +// RunAsUserStrategyOptionsApplyConfiguration represents an declarative configuration of the RunAsUserStrategyOptions type for use +// with apply. +type RunAsUserStrategyOptionsApplyConfiguration struct { + Rule *v1beta1.RunAsUserStrategy `json:"rule,omitempty"` + Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` +} + +// RunAsUserStrategyOptionsApplyConfiguration constructs an declarative configuration of the RunAsUserStrategyOptions type for use with +// apply. +func RunAsUserStrategyOptions() *RunAsUserStrategyOptionsApplyConfiguration { + return &RunAsUserStrategyOptionsApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRule(value v1beta1.RunAsUserStrategy) *RunAsUserStrategyOptionsApplyConfiguration { + b.Rule = &value + return b +} + +// WithRanges adds the given value to the Ranges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ranges field. +func (b *RunAsUserStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *RunAsUserStrategyOptionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRanges") + } + b.Ranges = append(b.Ranges, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go new file mode 100644 index 00000000000..c19a7ce6175 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/runtimeclassstrategyoptions.go @@ -0,0 +1,50 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +// RuntimeClassStrategyOptionsApplyConfiguration represents an declarative configuration of the RuntimeClassStrategyOptions type for use +// with apply. +type RuntimeClassStrategyOptionsApplyConfiguration struct { + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames,omitempty"` + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty"` +} + +// RuntimeClassStrategyOptionsApplyConfiguration constructs an declarative configuration of the RuntimeClassStrategyOptions type for use with +// apply. +func RuntimeClassStrategyOptions() *RuntimeClassStrategyOptionsApplyConfiguration { + return &RuntimeClassStrategyOptionsApplyConfiguration{} +} + +// WithAllowedRuntimeClassNames adds the given value to the AllowedRuntimeClassNames field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedRuntimeClassNames field. +func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithAllowedRuntimeClassNames(values ...string) *RuntimeClassStrategyOptionsApplyConfiguration { + for i := range values { + b.AllowedRuntimeClassNames = append(b.AllowedRuntimeClassNames, values[i]) + } + return b +} + +// WithDefaultRuntimeClassName sets the DefaultRuntimeClassName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DefaultRuntimeClassName field is set to the value of the last call. +func (b *RuntimeClassStrategyOptionsApplyConfiguration) WithDefaultRuntimeClassName(value string) *RuntimeClassStrategyOptionsApplyConfiguration { + b.DefaultRuntimeClassName = &value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go new file mode 100644 index 00000000000..de7ede618e9 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/selinuxstrategyoptions.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// SELinuxStrategyOptionsApplyConfiguration represents an declarative configuration of the SELinuxStrategyOptions type for use +// with apply. +type SELinuxStrategyOptionsApplyConfiguration struct { + Rule *v1beta1.SELinuxStrategy `json:"rule,omitempty"` + SELinuxOptions *v1.SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"` +} + +// SELinuxStrategyOptionsApplyConfiguration constructs an declarative configuration of the SELinuxStrategyOptions type for use with +// apply. +func SELinuxStrategyOptions() *SELinuxStrategyOptionsApplyConfiguration { + return &SELinuxStrategyOptionsApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *SELinuxStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SELinuxStrategy) *SELinuxStrategyOptionsApplyConfiguration { + b.Rule = &value + return b +} + +// WithSELinuxOptions sets the SELinuxOptions field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SELinuxOptions field is set to the value of the last call. +func (b *SELinuxStrategyOptionsApplyConfiguration) WithSELinuxOptions(value *v1.SELinuxOptionsApplyConfiguration) *SELinuxStrategyOptionsApplyConfiguration { + b.SELinuxOptions = value + return b +} diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go new file mode 100644 index 00000000000..9e4a9bb2ca3 --- /dev/null +++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/supplementalgroupsstrategyoptions.go @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" +) + +// SupplementalGroupsStrategyOptionsApplyConfiguration represents an declarative configuration of the SupplementalGroupsStrategyOptions type for use +// with apply. +type SupplementalGroupsStrategyOptionsApplyConfiguration struct { + Rule *v1beta1.SupplementalGroupsStrategyType `json:"rule,omitempty"` + Ranges []IDRangeApplyConfiguration `json:"ranges,omitempty"` +} + +// SupplementalGroupsStrategyOptionsApplyConfiguration constructs an declarative configuration of the SupplementalGroupsStrategyOptions type for use with +// apply. +func SupplementalGroupsStrategyOptions() *SupplementalGroupsStrategyOptionsApplyConfiguration { + return &SupplementalGroupsStrategyOptionsApplyConfiguration{} +} + +// WithRule sets the Rule field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Rule field is set to the value of the last call. +func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRule(value v1beta1.SupplementalGroupsStrategyType) *SupplementalGroupsStrategyOptionsApplyConfiguration { + b.Rule = &value + return b +} + +// WithRanges adds the given value to the Ranges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Ranges field. +func (b *SupplementalGroupsStrategyOptionsApplyConfiguration) WithRanges(values ...*IDRangeApplyConfiguration) *SupplementalGroupsStrategyOptionsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithRanges") + } + b.Ranges = append(b.Ranges, *values[i]) + } + return b +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index df0e0f9974e..102ce49bf59 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -19,7 +19,6 @@ package discovery import ( "context" "encoding/json" - goerrors "errors" "fmt" "mime" "net/http" @@ -420,12 +419,6 @@ func (e *ErrGroupDiscoveryFailed) Error() string { return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) } -// Is makes it possible for the callers to use `errors.Is(` helper on errors wrapped with ErrGroupDiscoveryFailed error. -func (e *ErrGroupDiscoveryFailed) Is(target error) bool { - _, ok := target.(*ErrGroupDiscoveryFailed) - return ok -} - // IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover // a complete list of APIs for the client to use. func IsGroupDiscoveryFailedError(err error) bool { @@ -433,16 +426,6 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } -// GroupDiscoveryFailedErrorGroups returns true if the error is an ErrGroupDiscoveryFailed error, -// along with the map of group versions that failed discovery. -func GroupDiscoveryFailedErrorGroups(err error) (map[schema.GroupVersion]error, bool) { - var groupDiscoveryError *ErrGroupDiscoveryFailed - if err != nil && goerrors.As(err, &groupDiscoveryError) { - return groupDiscoveryError.Groups, true - } - return nil, false -} - func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { var sgs *metav1.APIGroupList var resources []*metav1.APIResourceList @@ -654,7 +637,16 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", openAPIV2mimePb).Do(context.TODO()).Raw() if err != nil { - return nil, err + if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { + // single endpoint not found/registered in old server, try to fetch old endpoint + // TODO: remove this when kubectl/client-go don't work with 1.9 server + data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw() + if err != nil { + return nil, err + } + } else { + return nil, err + } } document := &openapi_v2.Document{} err = proto.Unmarshal(data, document) diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go index 9fc86441a19..7dd0ae6353c 100644 --- a/vendor/k8s.io/client-go/informers/factory.go +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -60,7 +60,6 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration - transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -99,14 +98,6 @@ func WithNamespace(namespace string) SharedInformerOption { } } -// WithTransform sets a transform on all informers. -func WithTransform(transform cache.TransformFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.transform = transform - return factory - } -} - // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -211,7 +202,6 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) - informer.SetTransform(f.transform) f.informers[informerType] = informer return informer diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go index 48dd9a8a117..1d3ca09efc9 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/interface.go @@ -19,7 +19,7 @@ limitations under the License. package flowcontrol import ( - v1 "k8s.io/client-go/informers/flowcontrol/v1" + v1alpha1 "k8s.io/client-go/informers/flowcontrol/v1alpha1" v1beta1 "k8s.io/client-go/informers/flowcontrol/v1beta1" v1beta2 "k8s.io/client-go/informers/flowcontrol/v1beta2" v1beta3 "k8s.io/client-go/informers/flowcontrol/v1beta3" @@ -28,8 +28,8 @@ import ( // Interface provides access to each of this group's versions. type Interface interface { - // V1 provides access to shared informers for resources in V1. - V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface // V1beta2 provides access to shared informers for resources in V1beta2. @@ -49,9 +49,9 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// V1 returns a new v1.Interface. -func (g *group) V1() v1.Interface { - return v1.New(g.factory, g.namespace, g.tweakListOptions) +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } // V1beta1 returns a new v1beta1.Interface. diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go similarity index 78% rename from vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go index 30c41b189b8..9a4a904481d 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/flowschema.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( "context" time "time" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/flowcontrol/v1" + v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // FlowSchemas. type FlowSchemaInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.FlowSchemaLister + Lister() v1alpha1.FlowSchemaLister } type flowSchemaInformer struct { @@ -57,20 +57,20 @@ func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Durati func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1().FlowSchemas().List(context.TODO(), options) + return client.FlowcontrolV1alpha1().FlowSchemas().List(context.TODO(), options) }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1().FlowSchemas().Watch(context.TODO(), options) + return client.FlowcontrolV1alpha1().FlowSchemas().Watch(context.TODO(), options) }, }, - &flowcontrolv1.FlowSchema{}, + &flowcontrolv1alpha1.FlowSchema{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *flowSchemaInformer) defaultInformer(client kubernetes.Interface, resync } func (f *flowSchemaInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1.FlowSchema{}, f.defaultInformer) + return f.factory.InformerFor(&flowcontrolv1alpha1.FlowSchema{}, f.defaultInformer) } -func (f *flowSchemaInformer) Lister() v1.FlowSchemaLister { - return v1.NewFlowSchemaLister(f.Informer().GetIndexer()) +func (f *flowSchemaInformer) Lister() v1alpha1.FlowSchemaLister { + return v1alpha1.NewFlowSchemaLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go similarity index 99% rename from vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go index 3de934900fe..7097c0058bf 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1/interface.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/interface.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" diff --git a/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go similarity index 76% rename from vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go index 7092c257259..b81f5c9c36b 100644 --- a/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/informers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -16,19 +16,19 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( "context" time "time" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" internalinterfaces "k8s.io/client-go/informers/internalinterfaces" kubernetes "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/listers/flowcontrol/v1" + v1alpha1 "k8s.io/client-go/listers/flowcontrol/v1alpha1" cache "k8s.io/client-go/tools/cache" ) @@ -36,7 +36,7 @@ import ( // PriorityLevelConfigurations. type PriorityLevelConfigurationInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.PriorityLevelConfigurationLister + Lister() v1alpha1.PriorityLevelConfigurationLister } type priorityLevelConfigurationInformer struct { @@ -57,20 +57,20 @@ func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPe func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1().PriorityLevelConfigurations().List(context.TODO(), options) + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().List(context.TODO(), options) }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.FlowcontrolV1().PriorityLevelConfigurations().Watch(context.TODO(), options) + return client.FlowcontrolV1alpha1().PriorityLevelConfigurations().Watch(context.TODO(), options) }, }, - &flowcontrolv1.PriorityLevelConfiguration{}, + &flowcontrolv1alpha1.PriorityLevelConfiguration{}, resyncPeriod, indexers, ) @@ -81,9 +81,9 @@ func (f *priorityLevelConfigurationInformer) defaultInformer(client kubernetes.I } func (f *priorityLevelConfigurationInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&flowcontrolv1.PriorityLevelConfiguration{}, f.defaultInformer) + return f.factory.InformerFor(&flowcontrolv1alpha1.PriorityLevelConfiguration{}, f.defaultInformer) } -func (f *priorityLevelConfigurationInformer) Lister() v1.PriorityLevelConfigurationLister { - return v1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) +func (f *priorityLevelConfigurationInformer) Lister() v1alpha1.PriorityLevelConfigurationLister { + return v1alpha1.NewPriorityLevelConfigurationLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go index 680768815bd..5495239b29d 100644 --- a/vendor/k8s.io/client-go/informers/generic.go +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -45,7 +45,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" v1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -259,11 +259,11 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case extensionsv1beta1.SchemeGroupVersion.WithResource("replicasets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil - // Group=flowcontrol.apiserver.k8s.io, Version=v1 - case flowcontrolv1.SchemeGroupVersion.WithResource("flowschemas"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1().FlowSchemas().Informer()}, nil - case flowcontrolv1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1().PriorityLevelConfigurations().Informer()}, nil + // Group=flowcontrol.apiserver.k8s.io, Version=v1alpha1 + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("flowschemas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().FlowSchemas().Informer()}, nil + case flowcontrolv1alpha1.SchemeGroupVersion.WithResource("prioritylevelconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Flowcontrol().V1alpha1().PriorityLevelConfigurations().Informer()}, nil // Group=flowcontrol.apiserver.k8s.io, Version=v1beta1 case flowcontrolv1beta1.SchemeGroupVersion.WithResource("flowschemas"): @@ -296,10 +296,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil - case networkingv1alpha1.SchemeGroupVersion.WithResource("servicecidrs"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil // Group=networking.k8s.io, Version=v1beta1 case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): @@ -326,6 +326,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=policy, Version=v1beta1 case policyv1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil + case policyv1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodSecurityPolicies().Informer()}, nil // Group=rbac.authorization.k8s.io, Version=v1 case rbacv1.SchemeGroupVersion.WithResource("clusterroles"): @@ -396,8 +398,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().CSIStorageCapacities().Informer()}, nil case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil - case storagev1alpha1.SchemeGroupVersion.WithResource("volumeattributesclasses"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttributesClasses().Informer()}, nil // Group=storage.k8s.io, Version=v1beta1 case storagev1beta1.SchemeGroupVersion.WithResource("csidrivers"): diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go similarity index 69% rename from vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go rename to vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go index 57e6021431e..cefd0f8a1ee 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go @@ -32,58 +32,58 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// ServiceCIDRInformer provides access to a shared informer and lister for -// ServiceCIDRs. -type ServiceCIDRInformer interface { +// ClusterCIDRInformer provides access to a shared informer and lister for +// ClusterCIDRs. +type ClusterCIDRInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ServiceCIDRLister + Lister() v1alpha1.ClusterCIDRLister } -type serviceCIDRInformer struct { +type clusterCIDRInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type. +// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil) +func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type. +// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1alpha1().ServiceCIDRs().List(context.TODO(), options) + return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.NetworkingV1alpha1().ServiceCIDRs().Watch(context.TODO(), options) + return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options) }, }, - &networkingv1alpha1.ServiceCIDR{}, + &networkingv1alpha1.ClusterCIDR{}, resyncPeriod, indexers, ) } -func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.ServiceCIDR{}, f.defaultInformer) +func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer) } -func (f *serviceCIDRInformer) Lister() v1alpha1.ServiceCIDRLister { - return v1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer()) +func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister { + return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer()) } diff --git a/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go index ae9883b55fe..07e7d208ca2 100644 --- a/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/networking/v1alpha1/interface.go @@ -24,10 +24,10 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // ClusterCIDRs returns a ClusterCIDRInformer. + ClusterCIDRs() ClusterCIDRInformer // IPAddresses returns a IPAddressInformer. IPAddresses() IPAddressInformer - // ServiceCIDRs returns a ServiceCIDRInformer. - ServiceCIDRs() ServiceCIDRInformer } type version struct { @@ -41,12 +41,12 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// ClusterCIDRs returns a ClusterCIDRInformer. +func (v *version) ClusterCIDRs() ClusterCIDRInformer { + return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // IPAddresses returns a IPAddressInformer. func (v *version) IPAddresses() IPAddressInformer { return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } - -// ServiceCIDRs returns a ServiceCIDRInformer. -func (v *version) ServiceCIDRs() ServiceCIDRInformer { - return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go index 055c8adc55d..a6c1825d278 100644 --- a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // PodDisruptionBudgets returns a PodDisruptionBudgetInformer. PodDisruptionBudgets() PodDisruptionBudgetInformer + // PodSecurityPolicies returns a PodSecurityPolicyInformer. + PodSecurityPolicies() PodSecurityPolicyInformer } type version struct { @@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer { return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } + +// PodSecurityPolicies returns a PodSecurityPolicyInformer. +func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { + return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000000..b87d23434ef --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/podsecuritypolicy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + policyv1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/policy/v1beta1" + cache "k8s.io/client-go/tools/cache" +) + +// PodSecurityPolicyInformer provides access to a shared informer and lister for +// PodSecurityPolicies. +type PodSecurityPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PodSecurityPolicyLister +} + +type podSecurityPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.PolicyV1beta1().PodSecurityPolicies().Watch(context.TODO(), options) + }, + }, + &policyv1beta1.PodSecurityPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&policyv1beta1.PodSecurityPolicy{}, f.defaultInformer) +} + +func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { + return v1beta1.NewPodSecurityPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go index bda3b1add97..033d3b10aa1 100644 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go @@ -28,8 +28,6 @@ type Interface interface { CSIStorageCapacities() CSIStorageCapacityInformer // VolumeAttachments returns a VolumeAttachmentInformer. VolumeAttachments() VolumeAttachmentInformer - // VolumeAttributesClasses returns a VolumeAttributesClassInformer. - VolumeAttributesClasses() VolumeAttributesClassInformer } type version struct { @@ -52,8 +50,3 @@ func (v *version) CSIStorageCapacities() CSIStorageCapacityInformer { func (v *version) VolumeAttachments() VolumeAttachmentInformer { return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } - -// VolumeAttributesClasses returns a VolumeAttributesClassInformer. -func (v *version) VolumeAttributesClasses() VolumeAttributesClassInformer { - return &volumeAttributesClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go deleted file mode 100644 index 5e62e2f4230..00000000000 --- a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - storagev1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" - cache "k8s.io/client-go/tools/cache" -) - -// VolumeAttributesClassInformer provides access to a shared informer and lister for -// VolumeAttributesClasses. -type VolumeAttributesClassInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.VolumeAttributesClassLister -} - -type volumeAttributesClassInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredVolumeAttributesClassInformer constructs a new informer for VolumeAttributesClass type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1alpha1().VolumeAttributesClasses().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.StorageV1alpha1().VolumeAttributesClasses().Watch(context.TODO(), options) - }, - }, - &storagev1alpha1.VolumeAttributesClass{}, - resyncPeriod, - indexers, - ) -} - -func (f *volumeAttributesClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredVolumeAttributesClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *volumeAttributesClassInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&storagev1alpha1.VolumeAttributesClass{}, f.defaultInformer) -} - -func (f *volumeAttributesClassInformer) Lister() v1alpha1.VolumeAttributesClassLister { - return v1alpha1.NewVolumeAttributesClassLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index a0095d086fd..6345f2fb626 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -52,7 +52,7 @@ import ( eventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - flowcontrolv1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3" @@ -109,7 +109,7 @@ type Interface interface { EventsV1() eventsv1.EventsV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface + FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface FlowcontrolV1beta3() flowcontrolv1beta3.FlowcontrolV1beta3Interface @@ -165,7 +165,7 @@ type Clientset struct { eventsV1 *eventsv1.EventsV1Client eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - flowcontrolV1 *flowcontrolv1.FlowcontrolV1Client + flowcontrolV1alpha1 *flowcontrolv1alpha1.FlowcontrolV1alpha1Client flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client flowcontrolV1beta3 *flowcontrolv1beta3.FlowcontrolV1beta3Client @@ -334,9 +334,9 @@ func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Inter return c.extensionsV1beta1 } -// FlowcontrolV1 retrieves the FlowcontrolV1Client -func (c *Clientset) FlowcontrolV1() flowcontrolv1.FlowcontrolV1Interface { - return c.flowcontrolV1 +// FlowcontrolV1alpha1 retrieves the FlowcontrolV1alpha1Client +func (c *Clientset) FlowcontrolV1alpha1() flowcontrolv1alpha1.FlowcontrolV1alpha1Interface { + return c.flowcontrolV1alpha1 } // FlowcontrolV1beta1 retrieves the FlowcontrolV1beta1Client @@ -604,7 +604,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.flowcontrolV1, err = flowcontrolv1.NewForConfigAndClient(&configShallowCopy, httpClient) + cs.flowcontrolV1alpha1, err = flowcontrolv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -742,7 +742,7 @@ func New(c rest.Interface) *Clientset { cs.eventsV1 = eventsv1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) - cs.flowcontrolV1 = flowcontrolv1.New(c) + cs.flowcontrolV1alpha1 = flowcontrolv1alpha1.New(c) cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) cs.flowcontrolV1beta3 = flowcontrolv1beta3.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index f44055fbfc7..64d3ce2a7b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -48,7 +48,7 @@ import ( eventsv1 "k8s.io/api/events/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - flowcontrolv1 "k8s.io/api/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" flowcontrolv1beta3 "k8s.io/api/flowcontrol/v1beta3" @@ -110,7 +110,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ eventsv1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, - flowcontrolv1.AddToScheme, + flowcontrolv1alpha1.AddToScheme, flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, flowcontrolv1beta3.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go index 3af5d054f10..df51baa4d4c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/doc.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/doc.go @@ -17,4 +17,4 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. // This package has the automatically generated typed clients. -package v1 +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go similarity index 64% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go index 3d7d93ef147..c6f2d940560 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowcontrol_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowcontrol_client.go @@ -16,39 +16,39 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( "net/http" - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type FlowcontrolV1Interface interface { +type FlowcontrolV1alpha1Interface interface { RESTClient() rest.Interface FlowSchemasGetter PriorityLevelConfigurationsGetter } -// FlowcontrolV1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. -type FlowcontrolV1Client struct { +// FlowcontrolV1alpha1Client is used to interact with features provided by the flowcontrol.apiserver.k8s.io group. +type FlowcontrolV1alpha1Client struct { restClient rest.Interface } -func (c *FlowcontrolV1Client) FlowSchemas() FlowSchemaInterface { +func (c *FlowcontrolV1alpha1Client) FlowSchemas() FlowSchemaInterface { return newFlowSchemas(c) } -func (c *FlowcontrolV1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { +func (c *FlowcontrolV1alpha1Client) PriorityLevelConfigurations() PriorityLevelConfigurationInterface { return newPriorityLevelConfigurations(c) } -// NewForConfig creates a new FlowcontrolV1Client for the given config. +// NewForConfig creates a new FlowcontrolV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*FlowcontrolV1Client, error) { +func NewForConfig(c *rest.Config) (*FlowcontrolV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -60,9 +60,9 @@ func NewForConfig(c *rest.Config) (*FlowcontrolV1Client, error) { return NewForConfigAndClient(&config, httpClient) } -// NewForConfigAndClient creates a new FlowcontrolV1Client for the given config and http client. +// NewForConfigAndClient creates a new FlowcontrolV1alpha1Client for the given config and http client. // Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1Client, error) { +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -71,12 +71,12 @@ func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowcontrolV1Client if err != nil { return nil, err } - return &FlowcontrolV1Client{client}, nil + return &FlowcontrolV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new FlowcontrolV1Client for the given config and +// NewForConfigOrDie creates a new FlowcontrolV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1Client { +func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -84,13 +84,13 @@ func NewForConfigOrDie(c *rest.Config) *FlowcontrolV1Client { return client } -// New creates a new FlowcontrolV1Client for the given RESTClient. -func New(c rest.Interface) *FlowcontrolV1Client { - return &FlowcontrolV1Client{c} +// New creates a new FlowcontrolV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *FlowcontrolV1alpha1Client { + return &FlowcontrolV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion + gv := v1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() @@ -104,7 +104,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FlowcontrolV1Client) RESTClient() rest.Interface { +func (c *FlowcontrolV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go index bd36c5e6a4e..95baf825191 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type FlowSchemasGetter interface { // FlowSchemaInterface has methods to work with FlowSchema resources. type FlowSchemaInterface interface { - Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (*v1.FlowSchema, error) - Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) - UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (*v1.FlowSchema, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.FlowSchema, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.FlowSchemaList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) - Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) - ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) + Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error) + Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) + UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) + Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) + ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) FlowSchemaExpansion } @@ -61,15 +61,15 @@ type flowSchemas struct { } // newFlowSchemas returns a FlowSchemas -func newFlowSchemas(c *FlowcontrolV1Client) *flowSchemas { +func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas { return &flowSchemas{ client: c.RESTClient(), } } // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any. -func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} +func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} err = c.client.Get(). Resource("flowschemas"). Name(name). @@ -80,12 +80,12 @@ func (c *flowSchemas) Get(ctx context.Context, name string, options metav1.GetOp } // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors. -func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.FlowSchemaList, err error) { +func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1.FlowSchemaList{} + result = &v1alpha1.FlowSchemaList{} err = c.client.Get(). Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). @@ -96,7 +96,7 @@ func (c *flowSchemas) List(ctx context.Context, opts metav1.ListOptions) (result } // Watch returns a watch.Interface that watches the requested flowSchemas. -func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { +func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -110,8 +110,8 @@ func (c *flowSchemas) Watch(ctx context.Context, opts metav1.ListOptions) (watch } // Create takes the representation of a flowSchema and creates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.CreateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} +func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} err = c.client.Post(). Resource("flowschemas"). VersionedParams(&opts, scheme.ParameterCodec). @@ -122,8 +122,8 @@ func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1.FlowSchema, opt } // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any. -func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} +func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). @@ -136,8 +136,8 @@ func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1.FlowSchema, opt // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchema, opts metav1.UpdateOptions) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} +func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} err = c.client.Put(). Resource("flowschemas"). Name(flowSchema.Name). @@ -150,7 +150,7 @@ func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1.FlowSchem } // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs. -func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { +func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). Resource("flowschemas"). Name(name). @@ -160,7 +160,7 @@ func (c *flowSchemas) Delete(ctx context.Context, name string, opts metav1.Delet } // DeleteCollection deletes a collection of objects. -func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { +func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -175,8 +175,8 @@ func (c *flowSchemas) DeleteCollection(ctx context.Context, opts metav1.DeleteOp } // Patch applies the patch and returns the patched flowSchema. -func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.FlowSchema, err error) { - result = &v1.FlowSchema{} +func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) { + result = &v1alpha1.FlowSchema{} err = c.client.Patch(pt). Resource("flowschemas"). Name(name). @@ -189,7 +189,7 @@ func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType } // Apply takes the given apply declarative configuration, applies it and returns the applied flowSchema. -func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { +func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -202,7 +202,7 @@ func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowS if name == nil { return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } - result = &v1.FlowSchema{} + result = &v1alpha1.FlowSchema{} err = c.client.Patch(types.ApplyPatchType). Resource("flowschemas"). Name(*name). @@ -215,7 +215,7 @@ func (c *flowSchemas) Apply(ctx context.Context, flowSchema *flowcontrolv1.FlowS // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1.FlowSchemaApplyConfiguration, opts metav1.ApplyOptions) (result *v1.FlowSchema, err error) { +func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1alpha1.FlowSchemaApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.FlowSchema, err error) { if flowSchema == nil { return nil, fmt.Errorf("flowSchema provided to Apply must not be nil") } @@ -230,7 +230,7 @@ func (c *flowSchemas) ApplyStatus(ctx context.Context, flowSchema *flowcontrolv1 return nil, fmt.Errorf("flowSchema.Name must be provided to Apply") } - result = &v1.FlowSchema{} + result = &v1alpha1.FlowSchema{} err = c.client.Patch(types.ApplyPatchType). Resource("flowschemas"). Name(*name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go similarity index 97% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go index 99067738873..065b5e6b42d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/generated_expansion.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1 +package v1alpha1 type FlowSchemaExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go similarity index 69% rename from vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go index 797fe94035e..327b727c182 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( "context" @@ -24,11 +24,11 @@ import ( "fmt" "time" - v1 "k8s.io/api/flowcontrol/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - flowcontrolv1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1" + flowcontrolv1alpha1 "k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1" scheme "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) @@ -41,17 +41,17 @@ type PriorityLevelConfigurationsGetter interface { // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources. type PriorityLevelConfigurationInterface interface { - Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (*v1.PriorityLevelConfiguration, error) - Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) - UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (*v1.PriorityLevelConfiguration, error) - Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityLevelConfiguration, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityLevelConfigurationList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) - Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) - ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) + Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) + Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) + ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) PriorityLevelConfigurationExpansion } @@ -61,15 +61,15 @@ type priorityLevelConfigurations struct { } // newPriorityLevelConfigurations returns a PriorityLevelConfigurations -func newPriorityLevelConfigurations(c *FlowcontrolV1Client) *priorityLevelConfigurations { +func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevelConfigurations { return &priorityLevelConfigurations{ client: c.RESTClient(), } } // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any. -func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Get(). Resource("prioritylevelconfigurations"). Name(name). @@ -80,12 +80,12 @@ func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, opti } // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors. -func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityLevelConfigurationList, err error) { +func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1.PriorityLevelConfigurationList{} + result = &v1alpha1.PriorityLevelConfigurationList{} err = c.client.Get(). Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). @@ -96,7 +96,7 @@ func (c *priorityLevelConfigurations) List(ctx context.Context, opts metav1.List } // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations. -func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { +func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -110,8 +110,8 @@ func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts metav1.Lis } // Create takes the representation of a priorityLevelConfiguration and creates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.CreateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Post(). Resource("prioritylevelconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). @@ -122,8 +122,8 @@ func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelC } // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any. -func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). @@ -136,8 +136,8 @@ func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelC // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1.PriorityLevelConfiguration, opts metav1.UpdateOptions) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Put(). Resource("prioritylevelconfigurations"). Name(priorityLevelConfiguration.Name). @@ -150,7 +150,7 @@ func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priority } // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs. -func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { +func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return c.client.Delete(). Resource("prioritylevelconfigurations"). Name(name). @@ -160,7 +160,7 @@ func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, o } // DeleteCollection deletes a collection of objects. -func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { +func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -175,8 +175,8 @@ func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts } // Patch applies the patch and returns the patched priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityLevelConfiguration, err error) { - result = &v1.PriorityLevelConfiguration{} +func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) { + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Patch(pt). Resource("prioritylevelconfigurations"). Name(name). @@ -189,7 +189,7 @@ func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt } // Apply takes the given apply declarative configuration, applies it and returns the applied priorityLevelConfiguration. -func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -202,7 +202,7 @@ func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelCo if name == nil { return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } - result = &v1.PriorityLevelConfiguration{} + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Patch(types.ApplyPatchType). Resource("prioritylevelconfigurations"). Name(*name). @@ -215,7 +215,7 @@ func (c *priorityLevelConfigurations) Apply(ctx context.Context, priorityLevelCo // ApplyStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfigurationApplyConfiguration, opts metav1.ApplyOptions) (result *v1.PriorityLevelConfiguration, err error) { +func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfigurationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) { if priorityLevelConfiguration == nil { return nil, fmt.Errorf("priorityLevelConfiguration provided to Apply must not be nil") } @@ -230,7 +230,7 @@ func (c *priorityLevelConfigurations) ApplyStatus(ctx context.Context, priorityL return nil, fmt.Errorf("priorityLevelConfiguration.Name must be provided to Apply") } - result = &v1.PriorityLevelConfiguration{} + result = &v1alpha1.PriorityLevelConfiguration{} err = c.client.Patch(types.ApplyPatchType). Resource("prioritylevelconfigurations"). Name(*name). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go new file mode 100644 index 00000000000..9df76351db8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. +// A group's client should implement this interface. +type ClusterCIDRsGetter interface { + ClusterCIDRs() ClusterCIDRInterface +} + +// ClusterCIDRInterface has methods to work with ClusterCIDR resources. +type ClusterCIDRInterface interface { + Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) + Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) + Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) + ClusterCIDRExpansion +} + +// clusterCIDRs implements ClusterCIDRInterface +type clusterCIDRs struct { + client rest.Interface +} + +// newClusterCIDRs returns a ClusterCIDRs +func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { + return &clusterCIDRs{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. +func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Get(). + Resource("clustercidrs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. +func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterCIDRList{} + err = c.client.Get(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterCIDRs. +func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Post(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCIDR). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Put(). + Resource("clustercidrs"). + Name(clusterCIDR.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCIDR). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. +func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustercidrs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustercidrs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterCIDR. +func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Patch(pt). + Resource("clustercidrs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. +func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { + if clusterCIDR == nil { + return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterCIDR) + if err != nil { + return nil, err + } + name := clusterCIDR.Name + if name == nil { + return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") + } + result = &v1alpha1.ClusterCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustercidrs"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index df12a463da8..9c2979d6c44 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -18,6 +18,6 @@ limitations under the License. package v1alpha1 -type IPAddressExpansion interface{} +type ClusterCIDRExpansion interface{} -type ServiceCIDRExpansion interface{} +type IPAddressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index c730e624685..884c846f598 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -28,8 +28,8 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface + ClusterCIDRsGetter IPAddressesGetter - ServiceCIDRsGetter } // NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. @@ -37,12 +37,12 @@ type NetworkingV1alpha1Client struct { restClient rest.Interface } -func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { - return newIPAddresses(c) +func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { + return newClusterCIDRs(c) } -func (c *NetworkingV1alpha1Client) ServiceCIDRs() ServiceCIDRInterface { - return newServiceCIDRs(c) +func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { + return newIPAddresses(c) } // NewForConfig creates a new NetworkingV1alpha1Client for the given config. diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go deleted file mode 100644 index 100f290a19f..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/servicecidr.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface. -// A group's client should implement this interface. -type ServiceCIDRsGetter interface { - ServiceCIDRs() ServiceCIDRInterface -} - -// ServiceCIDRInterface has methods to work with ServiceCIDR resources. -type ServiceCIDRInterface interface { - Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error) - Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) - UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) - Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) - ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) - ServiceCIDRExpansion -} - -// serviceCIDRs implements ServiceCIDRInterface -type serviceCIDRs struct { - client rest.Interface -} - -// newServiceCIDRs returns a ServiceCIDRs -func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs { - return &serviceCIDRs{ - client: c.RESTClient(), - } -} - -// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any. -func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Get(). - Resource("servicecidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors. -func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ServiceCIDRList{} - err = c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceCIDRs. -func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Post(). - Resource("servicecidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any. -func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Put(). - Resource("servicecidrs"). - Name(serviceCIDR.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(serviceCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs. -func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("servicecidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("servicecidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched serviceCIDR. -func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) { - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(pt). - Resource("servicecidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR. -func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) { - if serviceCIDR == nil { - return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(serviceCIDR) - if err != nil { - return nil, err - } - - name := serviceCIDR.Name - if name == nil { - return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply") - } - - result = &v1alpha1.ServiceCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("servicecidrs"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go index 6fce70c4eb8..078c16d5cb1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1beta1 type PodDisruptionBudgetExpansion interface{} + +type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000000..944b61de47f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + policyv1beta1 "k8s.io/client-go/applyconfigurations/policy/v1beta1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error) + Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSecurityPolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(podSecurityPolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied podSecurityPolicy. +func (c *podSecurityPolicies) Apply(ctx context.Context, podSecurityPolicy *policyv1beta1.PodSecurityPolicyApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.PodSecurityPolicy, err error) { + if podSecurityPolicy == nil { + return nil, fmt.Errorf("podSecurityPolicy provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(podSecurityPolicy) + if err != nil { + return nil, err + } + name := podSecurityPolicy.Name + if name == nil { + return nil, fmt.Errorf("podSecurityPolicy.Name must be provided to Apply") + } + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(types.ApplyPatchType). + Resource("podsecuritypolicies"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go index fdb50932167..5b65c9c0aa1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -30,6 +30,7 @@ type PolicyV1beta1Interface interface { RESTClient() rest.Interface EvictionsGetter PodDisruptionBudgetsGetter + PodSecurityPoliciesGetter } // PolicyV1beta1Client is used to interact with features provided by the policy group. @@ -45,6 +46,10 @@ func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisrupti return newPodDisruptionBudgets(c, namespace) } +func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + // NewForConfig creates a new PolicyV1beta1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go index 436e910f244..0f51c85f9e7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -21,5 +21,3 @@ package v1alpha1 type CSIStorageCapacityExpansion interface{} type VolumeAttachmentExpansion interface{} - -type VolumeAttributesClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go index 63e3fc243f9..c9bf11d766c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -30,7 +30,6 @@ type StorageV1alpha1Interface interface { RESTClient() rest.Interface CSIStorageCapacitiesGetter VolumeAttachmentsGetter - VolumeAttributesClassesGetter } // StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. @@ -46,10 +45,6 @@ func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { return newVolumeAttachments(c) } -func (c *StorageV1alpha1Client) VolumeAttributesClasses() VolumeAttributesClassInterface { - return newVolumeAttributesClasses(c) -} - // NewForConfig creates a new StorageV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go deleted file mode 100644 index 6633a4dc150..00000000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattributesclass.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - storagev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// VolumeAttributesClassesGetter has a method to return a VolumeAttributesClassInterface. -// A group's client should implement this interface. -type VolumeAttributesClassesGetter interface { - VolumeAttributesClasses() VolumeAttributesClassInterface -} - -// VolumeAttributesClassInterface has methods to work with VolumeAttributesClass resources. -type VolumeAttributesClassInterface interface { - Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (*v1alpha1.VolumeAttributesClass, error) - Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (*v1alpha1.VolumeAttributesClass, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttributesClass, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttributesClassList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) - Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) - VolumeAttributesClassExpansion -} - -// volumeAttributesClasses implements VolumeAttributesClassInterface -type volumeAttributesClasses struct { - client rest.Interface -} - -// newVolumeAttributesClasses returns a VolumeAttributesClasses -func newVolumeAttributesClasses(c *StorageV1alpha1Client) *volumeAttributesClasses { - return &volumeAttributesClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeAttributesClass, and returns the corresponding volumeAttributesClass object, and an error if there is any. -func (c *volumeAttributesClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttributesClasses that match those selectors. -func (c *volumeAttributesClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttributesClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.VolumeAttributesClassList{} - err = c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttributesClasses. -func (c *volumeAttributesClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a volumeAttributesClass and creates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Create(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.CreateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Post(). - Resource("volumeattributesclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a volumeAttributesClass and updates it. Returns the server's representation of the volumeAttributesClass, and an error, if there is any. -func (c *volumeAttributesClasses) Update(ctx context.Context, volumeAttributesClass *v1alpha1.VolumeAttributesClass, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Put(). - Resource("volumeattributesclasses"). - Name(volumeAttributesClass.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(volumeAttributesClass). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the volumeAttributesClass and deletes it. Returns an error if one occurs. -func (c *volumeAttributesClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattributesclasses"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttributesClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattributesclasses"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched volumeAttributesClass. -func (c *volumeAttributesClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttributesClass, err error) { - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(pt). - Resource("volumeattributesclasses"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied volumeAttributesClass. -func (c *volumeAttributesClasses) Apply(ctx context.Context, volumeAttributesClass *storagev1alpha1.VolumeAttributesClassApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.VolumeAttributesClass, err error) { - if volumeAttributesClass == nil { - return nil, fmt.Errorf("volumeAttributesClass provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(volumeAttributesClass) - if err != nil { - return nil, err - } - name := volumeAttributesClass.Name - if name == nil { - return nil, fmt.Errorf("volumeAttributesClass.Name must be provided to Apply") - } - result = &v1alpha1.VolumeAttributesClass{} - err = c.client.Patch(types.ApplyPatchType). - Resource("volumeattributesclasses"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go similarity index 98% rename from vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go index 70b5eb5b171..3e74051681c 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/expansion_generated.go @@ -16,7 +16,7 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1 +package v1alpha1 // FlowSchemaListerExpansion allows custom methods to be added to // FlowSchemaLister. diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go similarity index 79% rename from vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go index 43ccd4e5ff9..c8a595cd29c 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/flowschema.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/flowschema.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,10 +30,10 @@ import ( type FlowSchemaLister interface { // List lists all FlowSchemas in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.FlowSchema, err error) + List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) // Get retrieves the FlowSchema from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.FlowSchema, error) + Get(name string) (*v1alpha1.FlowSchema, error) FlowSchemaListerExpansion } @@ -48,21 +48,21 @@ func NewFlowSchemaLister(indexer cache.Indexer) FlowSchemaLister { } // List lists all FlowSchemas in the indexer. -func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1.FlowSchema, err error) { +func (s *flowSchemaLister) List(selector labels.Selector) (ret []*v1alpha1.FlowSchema, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.FlowSchema)) + ret = append(ret, m.(*v1alpha1.FlowSchema)) }) return ret, err } // Get retrieves the FlowSchema from the index for a given name. -func (s *flowSchemaLister) Get(name string) (*v1.FlowSchema, error) { +func (s *flowSchemaLister) Get(name string) (*v1alpha1.FlowSchema, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1.Resource("flowschema"), name) + return nil, errors.NewNotFound(v1alpha1.Resource("flowschema"), name) } - return obj.(*v1.FlowSchema), nil + return obj.(*v1alpha1.FlowSchema), nil } diff --git a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go similarity index 79% rename from vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go rename to vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go index 61189b9cf98..daa4ff31d93 100644 --- a/vendor/k8s.io/client-go/listers/flowcontrol/v1/prioritylevelconfiguration.go +++ b/vendor/k8s.io/client-go/listers/flowcontrol/v1alpha1/prioritylevelconfiguration.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1 +package v1alpha1 import ( - v1 "k8s.io/api/flowcontrol/v1" + v1alpha1 "k8s.io/api/flowcontrol/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,10 +30,10 @@ import ( type PriorityLevelConfigurationLister interface { // List lists all PriorityLevelConfigurations in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) + List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) // Get retrieves the PriorityLevelConfiguration from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1.PriorityLevelConfiguration, error) + Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) PriorityLevelConfigurationListerExpansion } @@ -48,21 +48,21 @@ func NewPriorityLevelConfigurationLister(indexer cache.Indexer) PriorityLevelCon } // List lists all PriorityLevelConfigurations in the indexer. -func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1.PriorityLevelConfiguration, err error) { +func (s *priorityLevelConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityLevelConfiguration, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.PriorityLevelConfiguration)) + ret = append(ret, m.(*v1alpha1.PriorityLevelConfiguration)) }) return ret, err } // Get retrieves the PriorityLevelConfiguration from the index for a given name. -func (s *priorityLevelConfigurationLister) Get(name string) (*v1.PriorityLevelConfiguration, error) { +func (s *priorityLevelConfigurationLister) Get(name string) (*v1alpha1.PriorityLevelConfiguration, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1.Resource("prioritylevelconfiguration"), name) + return nil, errors.NewNotFound(v1alpha1.Resource("prioritylevelconfiguration"), name) } - return obj.(*v1.PriorityLevelConfiguration), nil + return obj.(*v1alpha1.PriorityLevelConfiguration), nil } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go similarity index 54% rename from vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go rename to vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go index 8bc2b10e681..dca9d7bf0cb 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/servicecidr.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go @@ -25,44 +25,44 @@ import ( "k8s.io/client-go/tools/cache" ) -// ServiceCIDRLister helps list ServiceCIDRs. +// ClusterCIDRLister helps list ClusterCIDRs. // All objects returned here must be treated as read-only. -type ServiceCIDRLister interface { - // List lists all ServiceCIDRs in the indexer. +type ClusterCIDRLister interface { + // List lists all ClusterCIDRs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) - // Get retrieves the ServiceCIDR from the index for a given name. + List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) + // Get retrieves the ClusterCIDR from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ServiceCIDR, error) - ServiceCIDRListerExpansion + Get(name string) (*v1alpha1.ClusterCIDR, error) + ClusterCIDRListerExpansion } -// serviceCIDRLister implements the ServiceCIDRLister interface. -type serviceCIDRLister struct { +// clusterCIDRLister implements the ClusterCIDRLister interface. +type clusterCIDRLister struct { indexer cache.Indexer } -// NewServiceCIDRLister returns a new ServiceCIDRLister. -func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister { - return &serviceCIDRLister{indexer: indexer} +// NewClusterCIDRLister returns a new ClusterCIDRLister. +func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister { + return &clusterCIDRLister{indexer: indexer} } -// List lists all ServiceCIDRs in the indexer. -func (s *serviceCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) { +// List lists all ClusterCIDRs in the indexer. +func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ServiceCIDR)) + ret = append(ret, m.(*v1alpha1.ClusterCIDR)) }) return ret, err } -// Get retrieves the ServiceCIDR from the index for a given name. -func (s *serviceCIDRLister) Get(name string) (*v1alpha1.ServiceCIDR, error) { +// Get retrieves the ClusterCIDR from the index for a given name. +func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("servicecidr"), name) + return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name) } - return obj.(*v1alpha1.ServiceCIDR), nil + return obj.(*v1alpha1.ClusterCIDR), nil } diff --git a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go index fc7316521bf..d57b71b0059 100644 --- a/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go @@ -18,10 +18,10 @@ limitations under the License. package v1alpha1 +// ClusterCIDRListerExpansion allows custom methods to be added to +// ClusterCIDRLister. +type ClusterCIDRListerExpansion interface{} + // IPAddressListerExpansion allows custom methods to be added to // IPAddressLister. type IPAddressListerExpansion interface{} - -// ServiceCIDRListerExpansion allows custom methods to be added to -// ServiceCIDRLister. -type ServiceCIDRListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go index eba5e2f72d6..9a005f20bb8 100644 --- a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go @@ -25,3 +25,7 @@ type EvictionListerExpansion interface{} // EvictionNamespaceListerExpansion allows custom methods to be added to // EvictionNamespaceLister. type EvictionNamespaceListerExpansion interface{} + +// PodSecurityPolicyListerExpansion allows custom methods to be added to +// PodSecurityPolicyLister. +type PodSecurityPolicyListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000000..7e73161b25a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/podsecuritypolicy.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodSecurityPolicyLister helps list PodSecurityPolicies. +// All objects returned here must be treated as read-only. +type PodSecurityPolicyLister interface { + // List lists all PodSecurityPolicies in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) + // Get retrieves the PodSecurityPolicy from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.PodSecurityPolicy, error) + PodSecurityPolicyListerExpansion +} + +// podSecurityPolicyLister implements the PodSecurityPolicyLister interface. +type podSecurityPolicyLister struct { + indexer cache.Indexer +} + +// NewPodSecurityPolicyLister returns a new PodSecurityPolicyLister. +func NewPodSecurityPolicyLister(indexer cache.Indexer) PodSecurityPolicyLister { + return &podSecurityPolicyLister{indexer: indexer} +} + +// List lists all PodSecurityPolicies in the indexer. +func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodSecurityPolicy)) + }) + return ret, err +} + +// Get retrieves the PodSecurityPolicy from the index for a given name. +func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("podsecuritypolicy"), name) + } + return obj.(*v1beta1.PodSecurityPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go index 327fb6e31cd..edefe6d05ed 100644 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go @@ -29,7 +29,3 @@ type CSIStorageCapacityNamespaceListerExpansion interface{} // VolumeAttachmentListerExpansion allows custom methods to be added to // VolumeAttachmentLister. type VolumeAttachmentListerExpansion interface{} - -// VolumeAttributesClassListerExpansion allows custom methods to be added to -// VolumeAttributesClassLister. -type VolumeAttributesClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go deleted file mode 100644 index f30b4a89ba1..00000000000 --- a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattributesclass.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// VolumeAttributesClassLister helps list VolumeAttributesClasses. -// All objects returned here must be treated as read-only. -type VolumeAttributesClassLister interface { - // List lists all VolumeAttributesClasses in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) - // Get retrieves the VolumeAttributesClass from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.VolumeAttributesClass, error) - VolumeAttributesClassListerExpansion -} - -// volumeAttributesClassLister implements the VolumeAttributesClassLister interface. -type volumeAttributesClassLister struct { - indexer cache.Indexer -} - -// NewVolumeAttributesClassLister returns a new VolumeAttributesClassLister. -func NewVolumeAttributesClassLister(indexer cache.Indexer) VolumeAttributesClassLister { - return &volumeAttributesClassLister{indexer: indexer} -} - -// List lists all VolumeAttributesClasses in the indexer. -func (s *volumeAttributesClassLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttributesClass, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.VolumeAttributesClass)) - }) - return ret, err -} - -// Get retrieves the VolumeAttributesClass from the index for a given name. -func (s *volumeAttributesClassLister) Get(name string) (*v1alpha1.VolumeAttributesClass, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("volumeattributesclass"), name) - } - return obj.(*v1alpha1.VolumeAttributesClass), nil -} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go index ca517a01d4d..7ab3cd46fe3 100644 --- a/vendor/k8s.io/client-go/restmapper/shortcut.go +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -17,7 +17,6 @@ limitations under the License. package restmapper import ( - "fmt" "strings" "k8s.io/klog/v2" @@ -33,15 +32,13 @@ type shortcutExpander struct { RESTMapper meta.RESTMapper discoveryClient discovery.DiscoveryInterface - - warningHandler func(string) } var _ meta.ResettableRESTMapper = shortcutExpander{} // NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery -func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, warningHandler func(string)) meta.RESTMapper { - return shortcutExpander{RESTMapper: delegate, discoveryClient: client, warningHandler: warningHandler} +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} } // KindFor fulfills meta.RESTMapper @@ -148,37 +145,16 @@ func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionRes } } - found := false - var rsc schema.GroupVersionResource - warnedAmbiguousShortcut := make(map[schema.GroupResource]bool) for _, item := range shortcutResources { if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { continue } if resource.Resource == item.ShortForm.Resource { - if found { - if item.LongForm.Group == rsc.Group && item.LongForm.Resource == rsc.Resource { - // It is common and acceptable that group/resource has multiple - // versions registered in cluster. This does not introduce ambiguity - // in terms of shortname usage. - continue - } - if !warnedAmbiguousShortcut[item.LongForm] { - if e.warningHandler != nil { - e.warningHandler(fmt.Sprintf("short name %q could also match lower priority resource %s", resource.Resource, item.LongForm.String())) - } - warnedAmbiguousShortcut[item.LongForm] = true - } - continue - } - rsc.Resource = item.LongForm.Resource - rsc.Group = item.LongForm.Group - found = true + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource } } - if found { - return rsc - } // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling if len(resource.Group) == 0 { diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index c1ea13de574..45eaff52853 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -334,9 +334,12 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { return nil } if err != nil { - klog.Warningf("The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = %v", err) + if !apierrors.IsInvalid(err) { + return err + } + klog.Warning("the watch-list feature is not supported by the server, falling back to the previous LIST/WATCH semantic") fallbackToList = true - // ensure that we won't accidentally pass some garbage down the watch. + // Ensure that we won't accidentally pass some garbage down the watch. w = nil } } @@ -348,8 +351,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { } } - klog.V(2).Infof("Caches populated for %v from %s", r.typeDescription, r.name) - resyncerrc := make(chan error, 1) cancelCh := make(chan struct{}) defer close(cancelCh) @@ -394,11 +395,6 @@ func (r *Reflector) watch(w watch.Interface, stopCh <-chan struct{}, resyncerrc // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors select { case <-stopCh: - // we can only end up here when the stopCh - // was closed after a successful watchlist or list request - if w != nil { - w.Stop() - } return nil default: } @@ -674,12 +670,6 @@ func (r *Reflector) watchList(stopCh <-chan struct{}) (watch.Interface, error) { // "k8s.io/initial-events-end" bookmark. initTrace.Step("Objects streamed", trace.Field{Key: "count", Value: len(temporaryStore.List())}) r.setIsLastSyncResourceVersionUnavailable(false) - - // we utilize the temporaryStore to ensure independence from the current store implementation. - // as of today, the store is implemented as a queue and will be drained by the higher-level - // component as soon as it finishes replacing the content. - checkWatchListConsistencyIfRequested(stopCh, r.name, resourceVersion, r.listerWatcher, temporaryStore) - if err = r.store.Replace(temporaryStore.List(), resourceVersion); err != nil { return nil, fmt.Errorf("unable to sync watch-list result: %v", err) } @@ -772,7 +762,7 @@ loop: } case watch.Bookmark: // A `Bookmark` means watch has synced here, just update the resourceVersion - if meta.GetAnnotations()["k8s.io/initial-events-end"] == "true" { + if _, ok := meta.GetAnnotations()["k8s.io/initial-events-end"]; ok { if exitOnInitialEventsEndBookmark != nil { *exitOnInitialEventsEndBookmark = true } diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go deleted file mode 100644 index aa3027d714e..00000000000 --- a/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "context" - "os" - "sort" - "strconv" - "time" - - "github.com/google/go-cmp/cmp" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog/v2" -) - -var dataConsistencyDetectionEnabled = false - -func init() { - dataConsistencyDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_WATCHLIST_INCONSISTENCY_DETECTOR")) -} - -// checkWatchListConsistencyIfRequested performs a data consistency check only when -// the KUBE_WATCHLIST_INCONSISTENCY_DETECTOR environment variable was set during a binary startup. -// -// The consistency check is meant to be enforced only in the CI, not in production. -// The check ensures that data retrieved by the watch-list api call -// is exactly the same as data received by the standard list api call. -// -// Note that this function will panic when data inconsistency is detected. -// This is intentional because we want to catch it in the CI. -func checkWatchListConsistencyIfRequested(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - if !dataConsistencyDetectionEnabled { - return - } - checkWatchListConsistency(stopCh, identity, lastSyncedResourceVersion, listerWatcher, store) -} - -// checkWatchListConsistency exists solely for testing purposes. -// we cannot use checkWatchListConsistencyIfRequested because -// it is guarded by an environmental variable. -// we cannot manipulate the environmental variable because -// it will affect other tests in this package. -func checkWatchListConsistency(stopCh <-chan struct{}, identity string, lastSyncedResourceVersion string, listerWatcher Lister, store Store) { - klog.Warningf("%s: data consistency check for the watch-list feature is enabled, this will result in an additional call to the API server.", identity) - opts := metav1.ListOptions{ - ResourceVersion: lastSyncedResourceVersion, - ResourceVersionMatch: metav1.ResourceVersionMatchExact, - } - var list runtime.Object - err := wait.PollUntilContextCancel(wait.ContextForChannel(stopCh), time.Second, true, func(_ context.Context) (done bool, err error) { - list, err = listerWatcher.List(opts) - if err != nil { - // the consistency check will only be enabled in the CI - // and LIST calls in general will be retired by the client-go library - // if we fail simply log and retry - klog.Errorf("failed to list data from the server, retrying until stopCh is closed, err: %v", err) - return false, nil - } - return true, nil - }) - if err != nil { - klog.Errorf("failed to list data from the server, the watch-list consistency check won't be performed, stopCh was closed, err: %v", err) - return - } - - rawListItems, err := meta.ExtractListWithAlloc(list) - if err != nil { - panic(err) // this should never happen - } - - listItems := toMetaObjectSliceOrDie(rawListItems) - storeItems := toMetaObjectSliceOrDie(store.List()) - - sort.Sort(byUID(listItems)) - sort.Sort(byUID(storeItems)) - - if !cmp.Equal(listItems, storeItems) { - klog.Infof("%s: data received by the new watch-list api call is different than received by the standard list api call, diff: %v", identity, cmp.Diff(listItems, storeItems)) - msg := "data inconsistency detected for the watch-list feature, panicking!" - panic(msg) - } -} - -type byUID []metav1.Object - -func (a byUID) Len() int { return len(a) } -func (a byUID) Less(i, j int) bool { return a[i].GetUID() < a[j].GetUID() } -func (a byUID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func toMetaObjectSliceOrDie[T any](s []T) []metav1.Object { - result := make([]metav1.Object, len(s)) - for i, v := range s { - m, err := meta.Accessor(v) - if err != nil { - panic(err) - } - result[i] = m - } - return result -} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go index b3f37431d5c..be8694ddb62 100644 --- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -334,9 +334,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool }, stopCh) if err != nil { + klog.V(2).Infof("stop requested") return false } + klog.V(4).Infof("caches populated") return true } diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go index 0fc2fd0a0ca..10744156b83 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -49,12 +49,12 @@ type InClusterConfig interface { Possible() bool } -// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} } -// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader +// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} } diff --git a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go index e0164f301ee..e3000bf6ec1 100644 --- a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go +++ b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -81,27 +81,27 @@ type EventSinkImpl struct { } // Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Create(event *eventsv1.Event) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't create an event with empty namespace") } - return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{}) + return e.Interface.Events(event.Namespace).Create(context.TODO(), event, metav1.CreateOptions{}) } // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Update(event *eventsv1.Event) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't update an event with empty namespace") } - return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{}) + return e.Interface.Events(event.Namespace).Update(context.TODO(), event, metav1.UpdateOptions{}) } // Patch applies the patch and returns the patched event, and an error, if there is any. -func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { +func (e *EventSinkImpl) Patch(event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { if event.Namespace == "" { return nil, fmt.Errorf("can't patch an event with empty namespace") } - return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) + return e.Interface.Events(event.Namespace).Patch(context.TODO(), event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) } // NewBroadcaster Creates a new event broadcaster. @@ -124,13 +124,13 @@ func (e *eventBroadcasterImpl) Shutdown() { } // refreshExistingEventSeries refresh events TTL -func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) { +func (e *eventBroadcasterImpl) refreshExistingEventSeries() { // TODO: Investigate whether lock contention won't be a problem e.mu.Lock() defer e.mu.Unlock() for isomorphicKey, event := range e.eventCache { if event.Series != nil { - if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry { + if recordedEvent, retry := recordEvent(e.sink, event); !retry { if recordedEvent != nil { e.eventCache[isomorphicKey] = recordedEvent } @@ -142,7 +142,7 @@ func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) { // finishSeries checks if a series has ended and either: // - write final count to the apiserver // - delete a singleton event (i.e. series field is nil) from the cache -func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) { +func (e *eventBroadcasterImpl) finishSeries() { // TODO: Investigate whether lock contention won't be a problem e.mu.Lock() defer e.mu.Unlock() @@ -150,7 +150,7 @@ func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) { eventSerie := event.Series if eventSerie != nil { if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) { - if _, retry := recordEvent(ctx, e.sink, event); !retry { + if _, retry := recordEvent(e.sink, event); !retry { delete(e.eventCache, isomorphicKey) } } @@ -161,13 +161,13 @@ func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) { } // NewRecorder returns an EventRecorder that records events with the given event source. -func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger { +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder { hostname, _ := os.Hostname() reportingInstance := reportingController + "-" + hostname - return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} + return &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}} } -func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) { +func (e *eventBroadcasterImpl) recordToSink(event *eventsv1.Event, clock clock.Clock) { // Make a copy before modification, because there could be multiple listeners. eventCopy := event.DeepCopy() go func() { @@ -197,7 +197,7 @@ func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1 }() if evToRecord != nil { // TODO: Add a metric counting the number of recording attempts - e.attemptRecording(ctx, evToRecord) + e.attemptRecording(evToRecord) // We don't want the new recorded Event to be reflected in the // client's cache because server-side mutations could mess with the // aggregation mechanism used by the client. @@ -205,45 +205,40 @@ func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1 }() } -func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) { +func (e *eventBroadcasterImpl) attemptRecording(event *eventsv1.Event) *eventsv1.Event { tries := 0 for { - if _, retry := recordEvent(ctx, e.sink, event); !retry { - return + if recordedEvent, retry := recordEvent(e.sink, event); !retry { + return recordedEvent } tries++ if tries >= maxTriesPerEvent { - klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) - return + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + return nil } // Randomize sleep so that various clients won't all be - // synced up if the master goes down. Give up when - // the context is canceled. - select { - case <-ctx.Done(): - return - case <-time.After(wait.Jitter(e.sleepDuration, 0.25)): - } + // synced up if the master goes down. + time.Sleep(wait.Jitter(e.sleepDuration, 0.25)) } } -func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { +func recordEvent(sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { var newEvent *eventsv1.Event var err error isEventSeries := event.Series != nil if isEventSeries { patch, patchBytesErr := createPatchBytesForSeries(event) if patchBytesErr != nil { - klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible") + klog.Errorf("Unable to calculate diff, no merge is possible: %v", patchBytesErr) return nil, false } - newEvent, err = sink.Patch(ctx, event, patch) + newEvent, err = sink.Patch(event, patch) } // Update can fail because the event may have been removed and it no longer exists. if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) { // Making sure that ResourceVersion is empty on creation event.ResourceVersion = "" - newEvent, err = sink.Create(ctx, event) + newEvent, err = sink.Create(event) } if err == nil { return newEvent, false @@ -253,7 +248,7 @@ func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*e switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return nil, false case *errors.StatusError: if errors.IsAlreadyExists(err) { @@ -265,9 +260,9 @@ func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*e if isEventSeries { return nil, true } - klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { - klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return nil, false case *errors.UnexpectedObjectError: @@ -276,7 +271,7 @@ func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*e default: // This case includes actual http transport errors. Go ahead and retry. } - klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)") + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) return nil, true } @@ -312,31 +307,21 @@ func getKey(event *eventsv1.Event) eventKey { // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. // The return value can be ignored or used to stop recording, if desired. // TODO: this function should also return an error. -// -// Deprecated: use StartLogging instead. func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() { - logger := klog.Background().V(int(verbosity)) - stopWatcher, err := e.StartLogging(logger) - if err != nil { - logger.Error(err, "Failed to start event watcher") - return func() {} - } - return stopWatcher -} - -// StartLogging starts sending events received from this EventBroadcaster to the structured logger. -// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). -// The returned function can be ignored or used to stop recording, if desired. -func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) { - return e.StartEventWatcher( + stopWatcher, err := e.StartEventWatcher( func(obj runtime.Object) { event, ok := obj.(*eventsv1.Event) if !ok { - logger.Error(nil, "unexpected type, expected eventsv1.Event") + klog.Errorf("unexpected type, expected eventsv1.Event") return } - logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) + klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) }) + if err != nil { + klog.Errorf("failed to start event watcher: '%v'", err) + return func() {} + } + return stopWatcher } // StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. @@ -344,6 +329,7 @@ func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) { watcher, err := e.Watch() if err != nil { + klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err) return nil, err } go func() { @@ -359,42 +345,37 @@ func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime return watcher.Stop, nil } -func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error { +func (e *eventBroadcasterImpl) startRecordingEvents(stopCh <-chan struct{}) error { eventHandler := func(obj runtime.Object) { event, ok := obj.(*eventsv1.Event) if !ok { - klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event") + klog.Errorf("unexpected type, expected eventsv1.Event") return } - e.recordToSink(ctx, event, clock.RealClock{}) + e.recordToSink(event, clock.RealClock{}) } stopWatcher, err := e.StartEventWatcher(eventHandler) if err != nil { return err } go func() { - <-ctx.Done() + <-stopCh stopWatcher() }() return nil } // StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. -// Deprecated: use StartRecordingToSinkWithContext instead. func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) { - err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh)) + go wait.Until(e.refreshExistingEventSeries, refreshTime, stopCh) + go wait.Until(e.finishSeries, finishTime, stopCh) + err := e.startRecordingEvents(stopCh) if err != nil { - klog.Background().Error(err, "Failed to start recording to sink") + klog.Errorf("unexpected type, expected eventsv1.Event") + return } } -// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink. -func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error { - go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime) - go wait.UntilWithContext(ctx, e.finishSeries, finishTime) - return e.startRecordingEvents(ctx) -} - type eventBroadcasterAdapterImpl struct { coreClient typedv1core.EventsGetter coreBroadcaster record.EventBroadcaster @@ -428,14 +409,14 @@ func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{ } } -func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger { +func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorder { if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name) } return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name)) } -func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger { +func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorder { return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name}) } diff --git a/vendor/k8s.io/client-go/tools/events/event_recorder.go b/vendor/k8s.io/client-go/tools/events/event_recorder.go index 654317884f2..17d05327153 100644 --- a/vendor/k8s.io/client-go/tools/events/event_recorder.go +++ b/vendor/k8s.io/client-go/tools/events/event_recorder.go @@ -40,33 +40,12 @@ type recorderImpl struct { clock clock.Clock } -var _ EventRecorder = &recorderImpl{} - func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { - recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...) -} - -type recorderImplLogger struct { - *recorderImpl - logger klog.Logger -} - -var _ EventRecorderLogger = &recorderImplLogger{} - -func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { - recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...) -} - -func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { - return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} -} - -func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { timestamp := metav1.MicroTime{Time: time.Now()} message := fmt.Sprintf(note, args...) refRegarding, err := reference.GetReference(recorder.scheme, regarding) if err != nil { - logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message) + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", regarding, err, eventtype, reason, message) return } @@ -74,11 +53,11 @@ func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Objec if related != nil { refRelated, err = reference.GetReference(recorder.scheme, related) if err != nil { - logger.V(9).Info("Could not construct reference", "object", related, "err", err) + klog.V(9).Infof("Could not construct reference to: '%#v' due to: '%v'.", related, err) } } if !util.ValidateEventType(eventtype) { - logger.Error(nil, "Unsupported event type", "eventType", eventtype) + klog.Errorf("Unsupported event type: '%v'", eventtype) return } event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action) diff --git a/vendor/k8s.io/client-go/tools/events/fake.go b/vendor/k8s.io/client-go/tools/events/fake.go index e26826d6c83..d572e0d3e17 100644 --- a/vendor/k8s.io/client-go/tools/events/fake.go +++ b/vendor/k8s.io/client-go/tools/events/fake.go @@ -20,7 +20,6 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" ) // FakeRecorder is used as a fake during tests. It is thread safe. It is usable @@ -30,8 +29,6 @@ type FakeRecorder struct { Events chan string } -var _ EventRecorderLogger = &FakeRecorder{} - // Eventf emits an event func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { if f.Events != nil { @@ -39,10 +36,6 @@ func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, } } -func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { - return f -} - // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/vendor/k8s.io/client-go/tools/events/interfaces.go b/vendor/k8s.io/client-go/tools/events/interfaces.go index bb6109f6234..20f8ca05daa 100644 --- a/vendor/k8s.io/client-go/tools/events/interfaces.go +++ b/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -17,30 +17,39 @@ limitations under the License. package events import ( - "context" - eventsv1 "k8s.io/api/events/v1" "k8s.io/apimachinery/pkg/runtime" - internalevents "k8s.io/client-go/tools/internal/events" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" ) -type EventRecorder = internalevents.EventRecorder -type EventRecorderLogger = internalevents.EventRecorderLogger +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Eventf constructs an event from the given information and puts it in the queue for sending. + // 'regarding' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'action' explains what happened with regarding/what action did the ReportingController + // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) + // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). + // 'note' is intended to be human readable. + Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) +} // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { // StartRecordingToSink starts sending events received from the specified eventBroadcaster. - // Deprecated: use StartRecordingToSinkWithContext instead. StartRecordingToSink(stopCh <-chan struct{}) - // StartRecordingToSink starts sending events received from the specified eventBroadcaster. - StartRecordingToSinkWithContext(ctx context.Context) error - // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger + NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorder // StartEventWatcher enables you to watch for emitted events without usage // of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests). @@ -50,14 +59,8 @@ type EventBroadcaster interface { // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured // logging function. The return value can be ignored or used to stop recording, if desired. - // Deprecated: use StartLogging instead. StartStructuredLogging(verbosity klog.Level) func() - // StartLogging starts sending events received from this EventBroadcaster to the structured logger. - // To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). - // The returned function can be ignored or used to stop recording, if desired. - StartLogging(logger klog.Logger) (func(), error) - // Shutdown shuts down the broadcaster Shutdown() } @@ -67,9 +70,9 @@ type EventBroadcaster interface { // It is assumed that EventSink will return the same sorts of errors as // client-go's REST client. type EventSink interface { - Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) - Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) - Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) + Create(event *eventsv1.Event) (*eventsv1.Event, error) + Update(event *eventsv1.Event) (*eventsv1.Event, error) + Patch(oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) } // EventBroadcasterAdapter is a auxiliary interface to simplify migration to @@ -82,10 +85,10 @@ type EventBroadcasterAdapter interface { StartRecordingToSink(stopCh <-chan struct{}) // NewRecorder creates a new Event Recorder with specified name. - NewRecorder(name string) EventRecorderLogger + NewRecorder(name string) EventRecorder // DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name. - DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger + DeprecatedNewLegacyRecorder(name string) record.EventRecorder // Shutdown shuts down the broadcaster. Shutdown() diff --git a/vendor/k8s.io/client-go/tools/internal/events/interfaces.go b/vendor/k8s.io/client-go/tools/internal/events/interfaces.go deleted file mode 100644 index be6261b531f..00000000000 --- a/vendor/k8s.io/client-go/tools/internal/events/interfaces.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package internal is needed to break an import cycle: record.EventRecorderAdapter -// needs this interface definition to implement it, but event.NewEventBroadcasterAdapter -// needs record.NewBroadcaster. Therefore this interface cannot be in event/interfaces.go. -package internal - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" -) - -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Eventf constructs an event from the given information and puts it in the queue for sending. - // 'regarding' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'related' is the secondary object for more complex actions. E.g. when regarding object triggers - // a creation or deletion of related object. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'action' explains what happened with regarding/what action did the ReportingController - // (ReportingController is a type of a Controller reporting an Event, e.g. k8s.io/node-controller, k8s.io/kubelet.) - // take in regarding's name; it should be in UpperCamelCase format (starting with a capital letter). - // 'note' is intended to be human readable. - Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) -} - -// EventRecorderLogger extends EventRecorder such that a logger can -// be set for methods in EventRecorder. Normally, those methods -// uses the global default logger to record errors and debug messages. -// If that is not desired, use WithLogger to provide a logger instance. -type EventRecorderLogger interface { - EventRecorder - - // WithLogger replaces the context used for logging. This is a cheap call - // and meant to be used for contextual logging: - // recorder := ... - // logger := klog.FromContext(ctx) - // recorder.WithLogger(logger).Eventf(...) - WithLogger(logger klog.Logger) EventRecorderLogger -} diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index d1511696d0f..f176167dc80 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -29,7 +29,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/watch" restclient "k8s.io/client-go/rest" - internalevents "k8s.io/client-go/tools/internal/events" "k8s.io/client-go/tools/record/util" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" @@ -111,21 +110,6 @@ type EventRecorder interface { AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) } -// EventRecorderLogger extends EventRecorder such that a logger can -// be set for methods in EventRecorder. Normally, those methods -// uses the global default logger to record errors and debug messages. -// If that is not desired, use WithLogger to provide a logger instance. -type EventRecorderLogger interface { - EventRecorder - - // WithLogger replaces the context used for logging. This is a cheap call - // and meant to be used for contextual logging: - // recorder := ... - // logger := klog.FromContext(ctx) - // recorder.WithLogger(logger).Eventf(...) - WithLogger(logger klog.Logger) EventRecorderLogger -} - // EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. type EventBroadcaster interface { // StartEventWatcher starts sending events received from this EventBroadcaster to the given @@ -147,7 +131,7 @@ type EventBroadcaster interface { // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder // Shutdown shuts down the broadcaster. Once the broadcaster is shut // down, it will only try to record an event in a sink once before @@ -158,14 +142,12 @@ type EventBroadcaster interface { // EventRecorderAdapter is a wrapper around a "k8s.io/client-go/tools/record".EventRecorder // implementing the new "k8s.io/client-go/tools/events".EventRecorder interface. type EventRecorderAdapter struct { - recorder EventRecorderLogger + recorder EventRecorder } -var _ internalevents.EventRecorder = &EventRecorderAdapter{} - // NewEventRecorderAdapter returns an adapter implementing the new // "k8s.io/client-go/tools/events".EventRecorder interface. -func NewEventRecorderAdapter(recorder EventRecorderLogger) *EventRecorderAdapter { +func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter { return &EventRecorderAdapter{ recorder: recorder, } @@ -176,76 +158,28 @@ func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, re a.recorder.Eventf(regarding, eventtype, reason, note, args...) } -func (a *EventRecorderAdapter) WithLogger(logger klog.Logger) internalevents.EventRecorderLogger { - return &EventRecorderAdapter{ - recorder: a.recorder.WithLogger(logger), - } -} - // Creates a new event broadcaster. -func NewBroadcaster(opts ...BroadcasterOption) EventBroadcaster { - c := config{ - sleepDuration: defaultSleepDuration, - } - for _, opt := range opts { - opt(&c) - } - eventBroadcaster := &eventBroadcasterImpl{ - Broadcaster: watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), - sleepDuration: c.sleepDuration, - options: c.CorrelatorOptions, - } - ctx := c.Context - if ctx == nil { - ctx = context.Background() - } else { - // Calling Shutdown is not required when a context was provided: - // when the context is canceled, this goroutine will shut down - // the broadcaster. - go func() { - <-ctx.Done() - eventBroadcaster.Broadcaster.Shutdown() - }() - } - eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(ctx) - return eventBroadcaster +func NewBroadcaster() EventBroadcaster { + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) } func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return NewBroadcaster(WithSleepDuration(sleepDuration)) + return newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration) } func NewBroadcasterWithCorrelatorOptions(options CorrelatorOptions) EventBroadcaster { - return NewBroadcaster(WithCorrelatorOptions(options)) -} - -func WithCorrelatorOptions(options CorrelatorOptions) BroadcasterOption { - return func(c *config) { - c.CorrelatorOptions = options - } -} - -// WithContext sets a context for the broadcaster. Canceling the context will -// shut down the broadcaster, Shutdown doesn't need to be called. The context -// can also be used to provide a logger. -func WithContext(ctx context.Context) BroadcasterOption { - return func(c *config) { - c.Context = ctx - } + eventBroadcaster := newEventBroadcaster(watch.NewLongQueueBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration) + eventBroadcaster.options = options + return eventBroadcaster } -func WithSleepDuration(sleepDuration time.Duration) BroadcasterOption { - return func(c *config) { - c.sleepDuration = sleepDuration +func newEventBroadcaster(broadcaster *watch.Broadcaster, sleepDuration time.Duration) *eventBroadcasterImpl { + eventBroadcaster := &eventBroadcasterImpl{ + Broadcaster: broadcaster, + sleepDuration: sleepDuration, } -} - -type BroadcasterOption func(*config) - -type config struct { - CorrelatorOptions - context.Context - sleepDuration time.Duration + eventBroadcaster.cancelationCtx, eventBroadcaster.cancel = context.WithCancel(context.Background()) + return eventBroadcaster } type eventBroadcasterImpl struct { @@ -286,12 +220,12 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve } tries := 0 for { - if recordEvent(e.cancelationCtx, sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { break } tries++ if tries >= maxTriesPerEvent { - klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) break } @@ -303,7 +237,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve } select { case <-e.cancelationCtx.Done(): - klog.FromContext(e.cancelationCtx).Error(nil, "Unable to write event (broadcaster is shut down)", "event", event) + klog.Errorf("Unable to write event '%#v' (broadcaster is shut down)", event) return case <-time.After(delay): } @@ -314,7 +248,7 @@ func (e *eventBroadcasterImpl) recordToSink(sink EventSink, event *v1.Event, eve // was successfully recorded or discarded, false if it should be retried. // If updateExistingEvent is false, it creates a new event, otherwise it updates // existing event. -func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { var newEvent *v1.Event var err error if updateExistingEvent { @@ -337,13 +271,13 @@ func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []b switch err.(type) { case *restclient.RequestConstructionError: // We will construct the request the same next time, so don't keep trying. - klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) return true case *errors.StatusError: if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) { - klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) } else { - klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) } return true case *errors.UnexpectedObjectError: @@ -352,7 +286,7 @@ func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []b default: // This case includes actual http transport errors. Go ahead and retry. } - klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)", "event", event) + klog.Errorf("Unable to write event: '%#v': '%v'(may retry after sleeping)", event, err) return false } @@ -365,15 +299,12 @@ func (e *eventBroadcasterImpl) StartLogging(logf func(format string, args ...int }) } -// StartStructuredLogging starts sending events received from this EventBroadcaster to a structured logger. -// The logger is retrieved from a context if the broadcaster was constructed with a context, otherwise -// the global default is used. +// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. // The return value can be ignored or used to stop recording, if desired. func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watch.Interface { - loggerV := klog.FromContext(e.cancelationCtx).V(int(verbosity)) return e.StartEventWatcher( func(e *v1.Event) { - loggerV.Info("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) + klog.V(verbosity).InfoS("Event occurred", "object", klog.KRef(e.InvolvedObject.Namespace, e.InvolvedObject.Name), "fieldPath", e.InvolvedObject.FieldPath, "kind", e.InvolvedObject.Kind, "apiVersion", e.InvolvedObject.APIVersion, "type", e.Type, "reason", e.Reason, "message", e.Message) }) } @@ -382,32 +313,26 @@ func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) watc func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { watcher, err := e.Watch() if err != nil { - klog.FromContext(e.cancelationCtx).Error(err, "Unable start event watcher (will not retry!)") + klog.Errorf("Unable start event watcher: '%v' (will not retry!)", err) } go func() { defer utilruntime.HandleCrash() - for { - select { - case <-e.cancelationCtx.Done(): - watcher.Stop() - return - case watchEvent := <-watcher.ResultChan(): - event, ok := watchEvent.Object.(*v1.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue - } - eventHandler(event) + for watchEvent := range watcher.ResultChan() { + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue } + eventHandler(event) } }() return watcher } // NewRecorder returns an EventRecorder that records events with the given event source. -func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorderLogger { - return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, e.Broadcaster, clock.RealClock{}} } type recorderImpl struct { @@ -417,17 +342,15 @@ type recorderImpl struct { clock clock.PassiveClock } -var _ EventRecorder = &recorderImpl{} - -func (recorder *recorderImpl) generateEvent(logger klog.Logger, object runtime.Object, annotations map[string]string, eventtype, reason, message string) { +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, eventtype, reason, message string) { ref, err := ref.GetReference(recorder.scheme, object) if err != nil { - logger.Error(err, "Could not construct reference, will not report event", "object", object, "eventType", eventtype, "reason", reason, "message", message) + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) return } if !util.ValidateEventType(eventtype) { - logger.Error(nil, "Unsupported event type", "eventType", eventtype) + klog.Errorf("Unsupported event type: '%v'", eventtype) return } @@ -444,16 +367,16 @@ func (recorder *recorderImpl) generateEvent(logger klog.Logger, object runtime.O // outgoing events anyway). sent, err := recorder.ActionOrDrop(watch.Added, event) if err != nil { - logger.Error(err, "Unable to record event (will not retry!)") + klog.Errorf("unable to record event: %v (will not retry!)", err) return } if !sent { - logger.Error(nil, "Unable to record event: too many queued events, dropped event", "event", event) + klog.Errorf("unable to record event: too many queued events, dropped event %#v", event) } } func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(klog.Background(), object, nil, eventtype, reason, message) + recorder.generateEvent(object, nil, eventtype, reason, message) } func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { @@ -461,7 +384,7 @@ func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, m } func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(klog.Background(), object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) + recorder.generateEvent(object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) } func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { @@ -485,26 +408,3 @@ func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map Type: eventtype, } } - -type recorderImplLogger struct { - *recorderImpl - logger klog.Logger -} - -var _ EventRecorderLogger = &recorderImplLogger{} - -func (recorder recorderImplLogger) Event(object runtime.Object, eventtype, reason, message string) { - recorder.recorderImpl.generateEvent(recorder.logger, object, nil, eventtype, reason, message) -} - -func (recorder recorderImplLogger) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder recorderImplLogger) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(recorder.logger, object, annotations, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { - return recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} -} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go index 67eac481712..fda4ad8ff8a 100644 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -20,7 +20,6 @@ import ( "fmt" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" ) // FakeRecorder is used as a fake during tests. It is thread safe. It is usable @@ -32,8 +31,6 @@ type FakeRecorder struct { IncludeObject bool } -var _ EventRecorderLogger = &FakeRecorder{} - func objectString(object runtime.Object, includeObject bool) string { if !includeObject { return "" @@ -71,10 +68,6 @@ func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[st f.writeEvent(object, annotations, eventtype, reason, messageFmt, args...) } -func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { - return f -} - // NewFakeRecorder creates new fake event recorder with event channel with // buffer of given size. func NewFakeRecorder(bufferSize int) *FakeRecorder { diff --git a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go deleted file mode 100644 index 4846cdb5509..00000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "context" -) - -var _ Executor = &fallbackExecutor{} - -type fallbackExecutor struct { - primary Executor - secondary Executor - shouldFallback func(error) bool -} - -// NewFallbackExecutor creates an Executor that first attempts to use the -// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial -// websocket "StreamWithContext" call fails. -// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { -func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) { - return &fallbackExecutor{ - primary: primary, - secondary: secondary, - shouldFallback: shouldFallback, - }, nil -} - -// Stream is deprecated. Please use "StreamWithContext". -func (f *fallbackExecutor) Stream(options StreamOptions) error { - return f.StreamWithContext(context.Background(), options) -} - -// StreamWithContext initially attempts to call "StreamWithContext" using the -// primary executor, falling back to calling the secondary executor if the -// initial primary call to upgrade to a websocket connection fails. -func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { - err := f.primary.StreamWithContext(ctx, options) - if f.shouldFallback(err) { - return f.secondary.StreamWithContext(ctx, options) - } - return err -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index 1ae67729be3..662a3cb4ac7 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -18,10 +18,17 @@ package remotecommand import ( "context" + "fmt" "io" "net/http" + "net/url" + + "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" ) // StreamOptions holds information pertaining to the current streaming session: @@ -56,3 +63,120 @@ type streamCreator interface { type streamProtocolHandler interface { stream(conn streamCreator) error } + +// streamExecutor handles transporting standard shell streams over an httpstream connection. +type streamExecutor struct { + upgrader spdy.Upgrader + transport http.RoundTripper + + method string + url *url.URL + protocols []string +} + +// NewSPDYExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. +func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + return NewSPDYExecutorForProtocols( + transport, upgrader, method, url, + remotecommand.StreamProtocolV4Name, + remotecommand.StreamProtocolV3Name, + remotecommand.StreamProtocolV2Name, + remotecommand.StreamProtocolV1Name, + ) +} + +// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { + return &streamExecutor{ + upgrader: upgrader, + transport: transport, + method: method, + url: url, + protocols: protocols, + }, nil +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *streamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. +func (e *streamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { + req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + conn, protocol, err := spdy.Negotiate( + e.upgrader, + &http.Client{Transport: e.transport}, + req, + e.protocols..., + ) + if err != nil { + return nil, nil, err + } + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return conn, streamer, nil +} + +// StreamWithContext opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects or the context is done. +func (e *streamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + conn, streamer, err := e.newConnectionAndStream(ctx, options) + if err != nil { + return err + } + defer conn.Close() + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + errorChan <- streamer.stream(conn) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/spdy.go b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go deleted file mode 100644 index c2bfcf8a654..00000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/spdy.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "context" - "fmt" - "net/http" - "net/url" - - "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport/spdy" - "k8s.io/klog/v2" -) - -// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection. -type spdyStreamExecutor struct { - upgrader spdy.Upgrader - transport http.RoundTripper - - method string - url *url.URL - protocols []string - rejectRedirects bool // if true, receiving redirect from upstream is an error -} - -// NewSPDYExecutor connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams. -func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) -} - -// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future -// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated) -// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect -// during the attempted upgrade in these "Stream" calls, an error is returned. -func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { - executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url) - if err != nil { - return nil, err - } - spdyExecutor := executor.(*spdyStreamExecutor) - spdyExecutor.rejectRedirects = true - return spdyExecutor, nil -} - -// NewSPDYExecutorForTransports connects to the provided server using the given transport, -// upgrades the response using the given upgrader to multiplexed bidirectional streams. -func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { - return NewSPDYExecutorForProtocols( - transport, upgrader, method, url, - remotecommand.StreamProtocolV5Name, - remotecommand.StreamProtocolV4Name, - remotecommand.StreamProtocolV3Name, - remotecommand.StreamProtocolV2Name, - remotecommand.StreamProtocolV1Name, - ) -} - -// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. -func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { - return &spdyStreamExecutor{ - upgrader: upgrader, - transport: transport, - method: method, - url: url, - protocols: protocols, - }, nil -} - -// Stream opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects. -func (e *spdyStreamExecutor) Stream(options StreamOptions) error { - return e.StreamWithContext(context.Background(), options) -} - -// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. -func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { - req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) - if err != nil { - return nil, nil, fmt.Errorf("error creating request: %v", err) - } - - client := http.Client{Transport: e.transport} - if e.rejectRedirects { - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return fmt.Errorf("redirect not allowed") - } - } - conn, protocol, err := spdy.Negotiate( - e.upgrader, - &client, - req, - e.protocols..., - ) - if err != nil { - return nil, nil, err - } - - var streamer streamProtocolHandler - - switch protocol { - case remotecommand.StreamProtocolV5Name: - streamer = newStreamProtocolV5(options) - case remotecommand.StreamProtocolV4Name: - streamer = newStreamProtocolV4(options) - case remotecommand.StreamProtocolV3Name: - streamer = newStreamProtocolV3(options) - case remotecommand.StreamProtocolV2Name: - streamer = newStreamProtocolV2(options) - case "": - klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) - fallthrough - case remotecommand.StreamProtocolV1Name: - streamer = newStreamProtocolV1(options) - } - - return conn, streamer, nil -} - -// StreamWithContext opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects or the context is done. -func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { - conn, streamer, err := e.newConnectionAndStream(ctx, options) - if err != nil { - return err - } - defer conn.Close() - - panicChan := make(chan any, 1) - errorChan := make(chan error, 1) - go func() { - defer func() { - if p := recover(); p != nil { - panicChan <- p - } - }() - errorChan <- streamer.stream(conn) - }() - - select { - case p := <-panicChan: - panic(p) - case err := <-errorChan: - return err - case <-ctx.Done(): - return ctx.Err() - } -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v5.go b/vendor/k8s.io/client-go/tools/remotecommand/v5.go deleted file mode 100644 index 4da7bfb1399..00000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/v5.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -// streamProtocolV5 add support for V5 of the remote command subprotocol. -// For the streamProtocolHandler, this version is the same as V4. -type streamProtocolV5 struct { - *streamProtocolV4 -} - -var _ streamProtocolHandler = &streamProtocolV5{} - -func newStreamProtocolV5(options StreamOptions) streamProtocolHandler { - return &streamProtocolV5{ - streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4), - } -} - -func (p *streamProtocolV5) stream(conn streamCreator) error { - return p.streamProtocolV4.stream(conn) -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go deleted file mode 100644 index 49ef4717cd9..00000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go +++ /dev/null @@ -1,519 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "sync" - "time" - - gwebsocket "github.com/gorilla/websocket" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport/websocket" - "k8s.io/klog/v2" -) - -// writeDeadline defines the time that a write to the websocket connection -// must complete by, otherwise an i/o timeout occurs. The writeDeadline -// has nothing to do with a response from the other websocket connection -// endpoint; only that the message was successfully processed by the -// local websocket connection. The typical write deadline within the websocket -// library is one second. -const writeDeadline = 2 * time.Second - -var ( - _ Executor = &wsStreamExecutor{} - _ streamCreator = &wsStreamCreator{} - _ httpstream.Stream = &stream{} - - streamType2streamID = map[string]byte{ - v1.StreamTypeStdin: remotecommand.StreamStdIn, - v1.StreamTypeStdout: remotecommand.StreamStdOut, - v1.StreamTypeStderr: remotecommand.StreamStdErr, - v1.StreamTypeError: remotecommand.StreamErr, - v1.StreamTypeResize: remotecommand.StreamResize, - } -) - -const ( - // pingPeriod defines how often a heartbeat "ping" message is sent. - pingPeriod = 5 * time.Second - // pingReadDeadline defines the time waiting for a response heartbeat - // "pong" message before a timeout error occurs for websocket reading. - // This duration must always be greater than the "pingPeriod". By defining - // this deadline in terms of the ping period, we are essentially saying - // we can drop "X-1" (e.g. 3-1=2) pings before firing the timeout. - pingReadDeadline = (pingPeriod * 3) + (1 * time.Second) -) - -// wsStreamExecutor handles transporting standard shell streams over an httpstream connection. -type wsStreamExecutor struct { - transport http.RoundTripper - upgrader websocket.ConnectionHolder - method string - url string - // requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io). - protocols []string - // selected protocol from the handshake process; could be empty string if handshake fails. - negotiated string - // period defines how often a "ping" heartbeat message is sent to the other endpoint. - heartbeatPeriod time.Duration - // deadline defines the amount of time before "pong" response must be received. - heartbeatDeadline time.Duration -} - -func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) { - // Only supports V5 protocol for correct version skew functionality. - // Previous api servers will proxy upgrade requests to legacy websocket - // servers on container runtimes which support V1-V4. These legacy - // websocket servers will not handle the new CLOSE signal. - return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name) -} - -// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection. -func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) { - transport, upgrader, err := websocket.RoundTripperFor(config) - if err != nil { - return nil, fmt.Errorf("error creating websocket transports: %v", err) - } - return &wsStreamExecutor{ - transport: transport, - upgrader: upgrader, - method: method, - url: url, - protocols: protocols, - heartbeatPeriod: pingPeriod, - heartbeatDeadline: pingReadDeadline, - }, nil -} - -// Deprecated: use StreamWithContext instead to avoid possible resource leaks. -// See https://github.com/kubernetes/kubernetes/pull/103177 for details. -func (e *wsStreamExecutor) Stream(options StreamOptions) error { - return e.StreamWithContext(context.Background(), options) -} - -// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various -// goroutines to implement the necessary streams over the connection. The "options" parameter -// defines which streams are requested. Returns an error if one occurred. This method is NOT -// safe to run concurrently with the same executor (because of the state stored in the upgrader). -func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { - req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil) - if err != nil { - return err - } - conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...) - if err != nil { - return err - } - if conn == nil { - panic(fmt.Errorf("websocket connection is nil")) - } - defer conn.Close() - e.negotiated = conn.Subprotocol() - klog.V(4).Infof("The subprotocol is %s", e.negotiated) - - var streamer streamProtocolHandler - switch e.negotiated { - case remotecommand.StreamProtocolV5Name: - streamer = newStreamProtocolV5(options) - case remotecommand.StreamProtocolV4Name: - streamer = newStreamProtocolV4(options) - case remotecommand.StreamProtocolV3Name: - streamer = newStreamProtocolV3(options) - case remotecommand.StreamProtocolV2Name: - streamer = newStreamProtocolV2(options) - case "": - klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) - fallthrough - case remotecommand.StreamProtocolV1Name: - streamer = newStreamProtocolV1(options) - } - - panicChan := make(chan any, 1) - errorChan := make(chan error, 1) - go func() { - defer func() { - if p := recover(); p != nil { - panicChan <- p - } - }() - creator := newWSStreamCreator(conn) - go creator.readDemuxLoop( - e.upgrader.DataBufferSize(), - e.heartbeatPeriod, - e.heartbeatDeadline, - ) - errorChan <- streamer.stream(creator) - }() - - select { - case p := <-panicChan: - panic(p) - case err := <-errorChan: - return err - case <-ctx.Done(): - return ctx.Err() - } -} - -type wsStreamCreator struct { - conn *gwebsocket.Conn - // Protects writing to websocket connection; reading is lock-free - connWriteLock sync.Mutex - // map of stream id to stream; multiple streams read/write the connection - streams map[byte]*stream - streamsMu sync.Mutex - // setStreamErr holds the error to return to anyone calling setStreams. - // this is populated in closeAllStreamReaders - setStreamErr error -} - -func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { - return &wsStreamCreator{ - conn: conn, - streams: map[byte]*stream{}, - } -} - -func (c *wsStreamCreator) getStream(id byte) *stream { - c.streamsMu.Lock() - defer c.streamsMu.Unlock() - return c.streams[id] -} - -func (c *wsStreamCreator) setStream(id byte, s *stream) error { - c.streamsMu.Lock() - defer c.streamsMu.Unlock() - if c.setStreamErr != nil { - return c.setStreamErr - } - c.streams[id] = s - return nil -} - -// CreateStream uses id from passed headers to create a stream over "c.conn" connection. -// Returns a Stream structure or nil and an error if one occurred. -func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) { - streamType := headers.Get(v1.StreamType) - id, ok := streamType2streamID[streamType] - if !ok { - return nil, fmt.Errorf("unknown stream type: %s", streamType) - } - if s := c.getStream(id); s != nil { - return nil, fmt.Errorf("duplicate stream for type %s", streamType) - } - reader, writer := io.Pipe() - s := &stream{ - headers: headers, - readPipe: reader, - writePipe: writer, - conn: c.conn, - connWriteLock: &c.connWriteLock, - id: id, - } - if err := c.setStream(id, s); err != nil { - _ = s.writePipe.Close() - _ = s.readPipe.Close() - return nil, err - } - return s, nil -} - -// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket -// connection. This loop reads the connection, and demultiplexes the data -// into one of the individual stream pipes (by checking the stream id). This -// loop can *not* be run concurrently, because there can only be one websocket -// connection reader at a time (a read mutex would provide no benefit). -func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) { - // Initialize and start the ping/pong heartbeat. - h := newHeartbeat(c.conn, period, deadline) - // Set initial timeout for websocket connection reading. - if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil { - klog.Errorf("Websocket initial setting read deadline failed %v", err) - return - } - go h.start() - // Buffer size must correspond to the same size allocated - // for the read buffer during websocket client creation. A - // difference can cause incomplete connection reads. - readBuffer := make([]byte, bufferSize) - for { - // NextReader() only returns data messages (BinaryMessage or Text - // Message). Even though this call will never return control frames - // such as ping, pong, or close, this call is necessary for these - // message types to be processed. There can only be one reader - // at a time, so this reader loop must *not* be run concurrently; - // there is no lock for reading. Calling "NextReader()" before the - // current reader has been processed will close the current reader. - // If the heartbeat read deadline times out, this "NextReader()" will - // return an i/o error, and error handling will clean up. - messageType, r, err := c.conn.NextReader() - if err != nil { - websocketErr, ok := err.(*gwebsocket.CloseError) - if ok && websocketErr.Code == gwebsocket.CloseNormalClosure { - err = nil // readers will get io.EOF as it's a normal closure - } else { - err = fmt.Errorf("next reader: %w", err) - } - c.closeAllStreamReaders(err) - return - } - // All remote command protocols send/receive only binary data messages. - if messageType != gwebsocket.BinaryMessage { - c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType)) - return - } - // It's ok to read just a single byte because the underlying library wraps the actual - // connection with a buffered reader anyway. - _, err = io.ReadFull(r, readBuffer[:1]) - if err != nil { - c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err)) - return - } - streamID := readBuffer[0] - s := c.getStream(streamID) - if s == nil { - klog.Errorf("Unknown stream id %d, discarding message", streamID) - continue - } - for { - nr, errRead := r.Read(readBuffer) - if nr > 0 { - // Write the data to the stream's pipe. This can block. - _, errWrite := s.writePipe.Write(readBuffer[:nr]) - if errWrite != nil { - // Pipe must have been closed by the stream user. - // Nothing to do, discard the message. - break - } - } - if errRead != nil { - if errRead == io.EOF { - break - } - c.closeAllStreamReaders(fmt.Errorf("read message: %w", err)) - return - } - } - } -} - -// closeAllStreamReaders closes readers in all streams. -// This unblocks all stream.Read() calls, and keeps any future streams from being created. -func (c *wsStreamCreator) closeAllStreamReaders(err error) { - c.streamsMu.Lock() - defer c.streamsMu.Unlock() - for _, s := range c.streams { - // Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes. - _ = s.writePipe.CloseWithError(err) - } - // ensure callers to setStreams receive an error after this point - if err != nil { - c.setStreamErr = err - } else { - c.setStreamErr = fmt.Errorf("closed all streams") - } -} - -type stream struct { - headers http.Header - readPipe *io.PipeReader - writePipe *io.PipeWriter - // conn is used for writing directly into the connection. - // Is nil after Close() / Reset() to prevent future writes. - conn *gwebsocket.Conn - // connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only. - // The mutex is shared across all streams because the underlying connection is shared. - connWriteLock *sync.Mutex - id byte -} - -func (s *stream) Read(p []byte) (n int, err error) { - return s.readPipe.Read(p) -} - -// Write writes directly to the underlying WebSocket connection. -func (s *stream) Write(p []byte) (n int, err error) { - klog.V(4).Infof("Write() on stream %d", s.id) - defer klog.V(4).Infof("Write() done on stream %d", s.id) - s.connWriteLock.Lock() - defer s.connWriteLock.Unlock() - if s.conn == nil { - return 0, fmt.Errorf("write on closed stream %d", s.id) - } - err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline)) - if err != nil { - klog.V(7).Infof("Websocket setting write deadline failed %v", err) - return 0, err - } - // Message writer buffers the message data, so we don't need to do that ourselves. - // Just write id and the data as two separate writes to avoid allocating an intermediate buffer. - w, err := s.conn.NextWriter(gwebsocket.BinaryMessage) - if err != nil { - return 0, err - } - defer func() { - if w != nil { - w.Close() - } - }() - _, err = w.Write([]byte{s.id}) - if err != nil { - return 0, err - } - n, err = w.Write(p) - if err != nil { - return n, err - } - err = w.Close() - w = nil - return n, err -} - -// Close half-closes the stream, indicating this side is finished with the stream. -func (s *stream) Close() error { - klog.V(4).Infof("Close() on stream %d", s.id) - defer klog.V(4).Infof("Close() done on stream %d", s.id) - s.connWriteLock.Lock() - defer s.connWriteLock.Unlock() - if s.conn == nil { - return fmt.Errorf("Close() on already closed stream %d", s.id) - } - // Communicate the CLOSE stream signal to the other websocket endpoint. - err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id}) - s.conn = nil - return err -} - -func (s *stream) Reset() error { - klog.V(4).Infof("Reset() on stream %d", s.id) - defer klog.V(4).Infof("Reset() done on stream %d", s.id) - s.Close() - return s.writePipe.Close() -} - -func (s *stream) Headers() http.Header { - return s.headers -} - -func (s *stream) Identifier() uint32 { - return uint32(s.id) -} - -// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This -// heartbeat works by setting a read deadline on the websocket connection, then -// pushing this deadline into the future for every successful heartbeat. If the -// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call -// inside the "readDemuxLoop" will return an i/o error prompting a connection close -// and cleanup. -type heartbeat struct { - conn *gwebsocket.Conn - // period defines how often a "ping" heartbeat message is sent to the other endpoint - period time.Duration - // closing the "closer" channel will clean up the heartbeat timers - closer chan struct{} - // optional data to send with "ping" message - message []byte - // optionally received data message with "pong" message, same as sent with ping - pongMessage []byte -} - -// newHeartbeat creates heartbeat structure encapsulating fields necessary to -// run the websocket connection ping/pong mechanism and sets up handlers on -// the websocket connection. -func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat { - h := &heartbeat{ - conn: conn, - period: period, - closer: make(chan struct{}), - } - // Set up handler for receiving returned "pong" message from other endpoint - // by pushing the read deadline into the future. The "msg" received could - // be empty. - h.conn.SetPongHandler(func(msg string) error { - // Push the read deadline into the future. - klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg) - err := h.conn.SetReadDeadline(time.Now().Add(deadline)) - if err != nil { - klog.Errorf("Websocket setting read deadline failed %v", err) - return err - } - if len(msg) > 0 { - h.pongMessage = []byte(msg) - } - return nil - }) - // Set up handler to cleanup timers when this endpoint receives "Close" message. - closeHandler := h.conn.CloseHandler() - h.conn.SetCloseHandler(func(code int, text string) error { - close(h.closer) - return closeHandler(code, text) - }) - return h -} - -// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC -// this data sent with "ping" message should be returned in "pong" message. -func (h *heartbeat) setMessage(msg string) { - h.message = []byte(msg) -} - -// start the heartbeat by setting up necesssary handlers and looping by sending "ping" -// message every "period" until the "closer" channel is closed. -func (h *heartbeat) start() { - // Loop to continually send "ping" message through websocket connection every "period". - t := time.NewTicker(h.period) - defer t.Stop() - for { - select { - case <-h.closer: - klog.V(8).Infof("closed channel--returning") - return - case <-t.C: - // "WriteControl" does not need to be protected by a mutex. According to - // gorilla/websockets library docs: "The Close and WriteControl methods can - // be called concurrently with all other methods." - if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(writeDeadline)); err == nil { - klog.V(8).Infof("Websocket Ping succeeeded") - } else { - klog.Errorf("Websocket Ping failed: %v", err) - if errors.Is(err, gwebsocket.ErrCloseSent) { - // we continue because c.conn.CloseChan will manage closing the connection already - continue - } else if e, ok := err.(net.Error); ok && e.Timeout() { - // Continue, in case this is a transient failure. - // c.conn.CloseChan above will tell us when the connection is - // actually closed. - // If Temporary function hadn't been deprecated, we would have used it. - // But most of temporary errors are timeout errors anyway. - continue - } - return - } - } - } -} diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go index 9fddc6c5f23..f50b68e5ffb 100644 --- a/vendor/k8s.io/client-go/transport/spdy/spdy.go +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -43,15 +43,11 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er if config.Proxy != nil { proxy = config.Proxy } - upgradeRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ - TLS: tlsConfig, - Proxier: proxy, - PingPeriod: time.Second * 5, - UpgradeTransport: nil, + upgradeRoundTripper := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxy, + PingPeriod: time.Second * 5, }) - if err != nil { - return nil, nil, err - } wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) if err != nil { return nil, nil, err diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 4770331a0e1..78060719a98 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -96,32 +96,6 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCA() { - /* - kubernetes mutual (2-way) x509 between client and apiserver: - - 1. apiserver sending its apiserver certificate along with its publickey to client - >2. client verifies the apiserver certificate sent against its cluster certificate authority data - 3. client sending its client certificate along with its public key to the apiserver - 4. apiserver verifies the client certificate sent against its cluster certificate authority data - - description: - here, with this block, - cluster certificate authority data gets loaded into TLS before the handshake process - for client to later during the handshake verify the apiserver certificate - - normal args related to this stage: - --certificate-authority='': - Path to a cert file for the certificate authority - - (retrievable from "kubectl options" command) - (suggested by @deads2k) - - see also: - - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go - - for the step 3, see: a few lines below in this file - - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go - */ - rootCAs, err := rootCertPool(c.TLS.CAData) if err != nil { return nil, fmt.Errorf("unable to load root certificates: %w", err) @@ -147,35 +121,6 @@ func TLSConfigFor(c *Config) (*tls.Config, error) { } if c.HasCertAuth() || c.HasCertCallback() { - - /* - kubernetes mutual (2-way) x509 between client and apiserver: - - 1. apiserver sending its apiserver certificate along with its publickey to client - 2. client verifies the apiserver certificate sent against its cluster certificate authority data - >3. client sending its client certificate along with its public key to the apiserver - 4. apiserver verifies the client certificate sent against its cluster certificate authority data - - description: - here, with this callback function, - client certificate and pub key get loaded into TLS during the handshake process - for apiserver to later in the step 4 verify the client certificate - - normal args related to this stage: - --client-certificate='': - Path to a client certificate file for TLS - --client-key='': - Path to a client key file for TLS - - (retrievable from "kubectl options" command) - (suggested by @deads2k) - - see also: - - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go - - for the step 2, see: a few lines above in this file - - for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go - */ - tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { // Note: static key/cert data always take precedence over cert // callback. diff --git a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go deleted file mode 100644 index 010f916bc7b..00000000000 --- a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package websocket - -import ( - "crypto/tls" - "fmt" - "net/http" - "net/url" - - gwebsocket "github.com/gorilla/websocket" - - "k8s.io/apimachinery/pkg/util/httpstream" - utilnet "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport" -) - -var ( - _ utilnet.TLSClientConfigHolder = &RoundTripper{} - _ http.RoundTripper = &RoundTripper{} -) - -// ConnectionHolder defines functions for structure providing -// access to the websocket connection. -type ConnectionHolder interface { - DataBufferSize() int - Connection() *gwebsocket.Conn -} - -// RoundTripper knows how to establish a connection to a remote WebSocket endpoint and make it available for use. -// RoundTripper must not be reused. -type RoundTripper struct { - // TLSConfig holds the TLS configuration settings to use when connecting - // to the remote server. - TLSConfig *tls.Config - - // Proxier specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxier func(req *http.Request) (*url.URL, error) - - // Conn holds the WebSocket connection after a round trip. - Conn *gwebsocket.Conn -} - -// Connection returns the stored websocket connection. -func (rt *RoundTripper) Connection() *gwebsocket.Conn { - return rt.Conn -} - -// DataBufferSize returns the size of buffers for the -// websocket connection. -func (rt *RoundTripper) DataBufferSize() int { - return 32 * 1024 -} - -// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder. -func (rt *RoundTripper) TLSClientConfig() *tls.Config { - return rt.TLSConfig -} - -// RoundTrip connects to the remote websocket using the headers in the request and the TLS -// configuration from the config -func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response, retErr error) { - defer func() { - if request.Body != nil { - err := request.Body.Close() - if retErr == nil { - retErr = err - } - } - }() - - // set the protocol version directly on the dialer from the header - protocolVersions := request.Header[httpstream.HeaderProtocolVersion] - delete(request.Header, httpstream.HeaderProtocolVersion) - - dialer := gwebsocket.Dialer{ - Proxy: rt.Proxier, - TLSClientConfig: rt.TLSConfig, - Subprotocols: protocolVersions, - ReadBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for - WriteBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for - } - switch request.URL.Scheme { - case "https": - request.URL.Scheme = "wss" - case "http": - request.URL.Scheme = "ws" - default: - return nil, fmt.Errorf("unknown url scheme: %s", request.URL.Scheme) - } - wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header) - if err != nil { - return nil, &httpstream.UpgradeFailureError{Cause: err} - } - - rt.Conn = wsConn - - return resp, nil -} - -// RoundTripperFor transforms the passed rest config into a wrapped roundtripper, as well -// as a pointer to the websocket RoundTripper. The websocket RoundTripper contains the -// websocket connection after RoundTrip() on the wrapper. Returns an error if there is -// a problem creating the round trippers. -func RoundTripperFor(config *restclient.Config) (http.RoundTripper, ConnectionHolder, error) { - transportCfg, err := config.TransportConfig() - if err != nil { - return nil, nil, err - } - tlsConfig, err := transport.TLSConfigFor(transportCfg) - if err != nil { - return nil, nil, err - } - proxy := config.Proxy - if proxy == nil { - proxy = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) - } - - upgradeRoundTripper := &RoundTripper{ - TLSConfig: tlsConfig, - Proxier: proxy, - } - wrapper, err := transport.HTTPWrappersForConfig(transportCfg, upgradeRoundTripper) - if err != nil { - return nil, nil, err - } - return wrapper, upgradeRoundTripper, nil -} - -// Negotiate opens a connection to a remote server and attempts to negotiate -// a WebSocket connection. Upon success, it returns the negotiated connection. -// The round tripper rt must use the WebSocket round tripper wsRt - see RoundTripperFor. -func Negotiate(rt http.RoundTripper, connectionInfo ConnectionHolder, req *http.Request, protocols ...string) (*gwebsocket.Conn, error) { - req.Header[httpstream.HeaderProtocolVersion] = protocols - resp, err := rt.RoundTrip(req) - if err != nil { - return nil, err - } - err = resp.Body.Close() - if err != nil { - connectionInfo.Connection().Close() - return nil, fmt.Errorf("error closing response body: %v", err) - } - return connectionInfo.Connection(), nil -} diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index a363d1afb4f..380c0645526 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -238,12 +238,8 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. func (q *Type) ShutDown() { - q.cond.L.Lock() - defer q.cond.L.Unlock() - - q.drain = false - q.shuttingDown = true - q.cond.Broadcast() + q.setDrain(false) + q.shutdown() } // ShutDownWithDrain will cause q to ignore all new items added to it. As soon @@ -256,16 +252,53 @@ func (q *Type) ShutDown() { // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. func (q *Type) ShutDownWithDrain() { + q.setDrain(true) + q.shutdown() + for q.isProcessing() && q.shouldDrain() { + q.waitForProcessing() + } +} + +// isProcessing indicates if there are still items on the work queue being +// processed. It's used to drain the work queue on an eventual shutdown. +func (q *Type) isProcessing() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return q.processing.len() != 0 +} + +// waitForProcessing waits for the worker goroutines to finish processing items +// and call Done on them. +func (q *Type) waitForProcessing() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + // Ensure that we do not wait on a queue which is already empty, as that + // could result in waiting for Done to be called on items in an empty queue + // which has already been shut down, which will result in waiting + // indefinitely. + if q.processing.len() == 0 { + return + } + q.cond.Wait() +} + +func (q *Type) setDrain(shouldDrain bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.drain = shouldDrain +} + +func (q *Type) shouldDrain() bool { q.cond.L.Lock() defer q.cond.L.Unlock() + return q.drain +} - q.drain = true +func (q *Type) shutdown() { + q.cond.L.Lock() + defer q.cond.L.Unlock() q.shuttingDown = true q.cond.Broadcast() - - for q.processing.len() != 0 && q.drain { - q.cond.Wait() - } } func (q *Type) ShuttingDown() bool { diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index 28b829cc139..dce920ad199 100644 --- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -77,7 +77,7 @@ func genStatus(t *types.Type) bool { // hasObjectMeta returns true if the type has a ObjectMeta field. func hasObjectMeta(t *types.Type) bool { for _, m := range t.Members { - if m.Embedded && m.Name == "ObjectMeta" { + if m.Embedded == true && m.Name == "ObjectMeta" { return true } } diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go index 4875393913b..f8ee7d07090 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/factory.go @@ -75,7 +75,6 @@ func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w i } m := map[string]interface{}{ "cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer), - "cacheTransformFunc": c.Universe.Type(cacheTransformFunc), "groupVersions": g.groupVersions, "gvInterfaces": gvInterfaces, "gvNewFuncs": gvNewFuncs, @@ -110,7 +109,6 @@ type sharedInformerFactory struct { lock {{.syncMutex|raw}} defaultResync {{.timeDuration|raw}} customResync map[{{.reflectType|raw}}]{{.timeDuration|raw}} - transform {{.cacheTransformFunc|raw}} informers map[{{.reflectType|raw}}]{{.cacheSharedIndexInformer|raw}} // startedInformers is used for tracking which informers have been started. @@ -149,14 +147,6 @@ func WithNamespace(namespace string) SharedInformerOption { } } -// WithTransform sets a transform on all informers. -func WithTransform(transform {{.cacheTransformFunc|raw}}) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.transform = transform - return factory - } -} - // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client {{.clientSetInterface|raw}}, defaultResync {{.timeDuration|raw}}) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -262,11 +252,11 @@ func (f *sharedInformerFactory) InformerFor(obj {{.runtimeObject|raw}}, newFunc } informer = newFunc(f.client, resyncPeriod) - informer.SetTransform(f.transform) f.informers[informerType] = informer return informer } + ` var sharedInformerFactoryInterface = ` diff --git a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go index fc1f7786f66..27d4bd51ab1 100644 --- a/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go +++ b/vendor/k8s.io/code-generator/cmd/informer-gen/generators/types.go @@ -28,7 +28,6 @@ var ( cacheNewGenericLister = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewGenericLister"} cacheNewSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "NewSharedIndexInformer"} cacheSharedIndexInformer = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "SharedIndexInformer"} - cacheTransformFunc = types.Name{Package: "k8s.io/client-go/tools/cache", Name: "TransformFunc"} listOptions = types.Name{Package: "k8s.io/kubernetes/pkg/apis/core", Name: "ListOptions"} reflectType = types.Name{Package: "reflect", Name: "Type"} runtimeObject = types.Name{Package: "k8s.io/apimachinery/pkg/runtime", Name: "Object"} diff --git a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go index fa8e3f1c356..242eb3aa10a 100644 --- a/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/vendor/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -93,7 +93,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat for _, t := range pkg.Types { klog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { - if typeMember.Name == "TypeMeta" && typeMember.Embedded { + if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { typesToRegister = append(typesToRegister, t) } } diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100644 new mode 100755 index 415b0b67c64..75001bae3fa --- a/vendor/k8s.io/code-generator/generate-internal-groups.sh +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -126,9 +126,6 @@ for GVs in ${GROUPS_WITH_VERSIONS}; do done done -CLIENTSET_PKG="${CLIENTSET_PKG_NAME:-clientset}" -CLIENTSET_NAME="${CLIENTSET_NAME_VERSIONED:-versioned}" - if grep -qw "deepcopy" <<<"${GENS}"; then if [ ! "$verify_only" ]; then # Nuke existing files @@ -204,6 +201,9 @@ if grep -qw "applyconfiguration" <<<"${GENS}"; then fi if grep -qw "client" <<<"${GENS}"; then + CLIENTSET_PKG="${CLIENTSET_PKG_NAME:-clientset}" + CLIENTSET_NAME="${CLIENTSET_NAME_VERSIONED:-versioned}" + if [ ! "$verify_only" ]; then # Nuke existing files root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/${CLIENTSET_PKG}/${CLIENTSET_NAME}" 2>/dev/null || true)" diff --git a/vendor/k8s.io/code-generator/kube_codegen.sh b/vendor/k8s.io/code-generator/kube_codegen.sh index 6ded2048368..3342b9dcaeb 100644 --- a/vendor/k8s.io/code-generator/kube_codegen.sh +++ b/vendor/k8s.io/code-generator/kube_codegen.sh @@ -50,16 +50,11 @@ function kube::codegen::internal::git_grep() { # --boilerplate # An optional override for the header file to insert into generated files. # -# --extra-peer-dir -# An optional list (this flag may be specified multiple times) of "extra" -# directories to consider during conversion generation. -# function kube::codegen::gen_helpers() { local in_pkg_root="" local out_base="" # gengo needs the output dir must be $out_base/$out_pkg_root local boilerplate="${KUBE_CODEGEN_ROOT}/hack/boilerplate.go.txt" local v="${KUBE_VERBOSE:-0}" - local extra_peers=() while [ "$#" -gt 0 ]; do case "$1" in @@ -75,10 +70,6 @@ function kube::codegen::gen_helpers() { boilerplate="$2" shift 2 ;; - "--extra-peer-dir") - extra_peers+=("$2") - shift 2 - ;; *) echo "unknown argument: $1" >&2 return 1 @@ -137,16 +128,16 @@ function kube::codegen::gen_helpers() { ":(glob)${root}"/'**/zz_generated.deepcopy.go' \ | xargs -0 rm -f - local input_args=() + local inputs=() for arg in "${input_pkgs[@]}"; do - input_args+=("--input-dirs" "$arg") + inputs+=("--input-dirs" "$arg") done "${gobin}/deepcopy-gen" \ -v "${v}" \ -O zz_generated.deepcopy \ --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ - "${input_args[@]}" + "${inputs[@]}" fi # Defaults @@ -171,16 +162,16 @@ function kube::codegen::gen_helpers() { ":(glob)${root}"/'**/zz_generated.defaults.go' \ | xargs -0 rm -f - local input_args=() + local inputs=() for arg in "${input_pkgs[@]}"; do - input_args+=("--input-dirs" "$arg") + inputs+=("--input-dirs" "$arg") done "${gobin}/defaulter-gen" \ -v "${v}" \ -O zz_generated.defaults \ --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ - "${input_args[@]}" + "${inputs[@]}" fi # Conversions @@ -205,21 +196,16 @@ function kube::codegen::gen_helpers() { ":(glob)${root}"/'**/zz_generated.conversion.go' \ | xargs -0 rm -f - local input_args=() + local inputs=() for arg in "${input_pkgs[@]}"; do - input_args+=("--input-dirs" "$arg") - done - local extra_peer_args=() - for arg in "${extra_peers[@]:+"${extra_peers[@]}"}"; do - extra_peer_args+=("--extra-peer-dirs" "$arg") + inputs+=("--input-dirs" "$arg") done "${gobin}/conversion-gen" \ -v "${v}" \ -O zz_generated.conversion \ --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ - "${extra_peer_args[@]:+"${extra_peer_args[@]}"}" \ - "${input_args[@]}" + "${inputs[@]}" fi } diff --git a/vendor/k8s.io/component-base/metrics/buckets.go b/vendor/k8s.io/component-base/metrics/buckets.go index 27a57eb7f8b..48d3093e0cd 100644 --- a/vendor/k8s.io/component-base/metrics/buckets.go +++ b/vendor/k8s.io/component-base/metrics/buckets.go @@ -33,16 +33,6 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { return prometheus.ExponentialBuckets(start, factor, count) } -// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is -// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { - return prometheus.ExponentialBucketsRange(min, max, count) -} - // MergeBuckets merges buckets together func MergeBuckets(buckets ...[]float64) []float64 { result := make([]float64, 1) diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go index d68a98c44a1..3b22d21ef25 100644 --- a/vendor/k8s.io/component-base/metrics/metric.go +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -166,7 +166,7 @@ func (r *lazyMetric) Create(version *semver.Version) bool { if deprecatedV != nil { dv = deprecatedV.String() } - registeredMetricsTotal.WithLabelValues(string(sl), dv).Inc() + registeredMetrics.WithLabelValues(string(sl), dv).Inc() return r.IsCreated() } diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go index 2c72cb48fd6..7a59b7ba169 100644 --- a/vendor/k8s.io/component-base/metrics/options.go +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -31,7 +31,6 @@ type Options struct { ShowHiddenMetricsForVersion string DisabledMetrics []string AllowListMapping map[string]string - AllowListMappingManifest string } // NewOptions returns default metrics options @@ -41,10 +40,6 @@ func NewOptions() *Options { // Validate validates metrics flags options. func (o *Options) Validate() []error { - if o == nil { - return nil - } - var errs []error err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) if err != nil { @@ -82,10 +77,6 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "The map from metric-label to value allow-list of this label. The key's format is ,. "+ "The value's format is ,..."+ "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") - fs.StringVar(&o.AllowListMappingManifest, "allow-metric-labels-manifest", o.AllowListMappingManifest, - "The path to the manifest file that contains the allow-list mapping. "+ - "The format of the file is the same as the flag --allow-metric-labels. "+ - "Note that the flag --allow-metric-labels will override the manifest file.") } // Apply applies parameters into global configuration of metrics. @@ -102,8 +93,6 @@ func (o *Options) Apply() { } if o.AllowListMapping != nil { SetLabelAllowListFromCLI(o.AllowListMapping) - } else if len(o.AllowListMappingManifest) > 0 { - SetLabelAllowListFromManifest(o.AllowListMappingManifest) } } @@ -129,7 +118,7 @@ func validateAllowMetricLabel(allowListMapping map[string]string) error { for k := range allowListMapping { reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) if reg.FindString(k) != k { - return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + return fmt.Errorf("--allow-metric-labels must has a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") } } return nil diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go index 30dfd2e3dcc..49d2d40bbf7 100644 --- a/vendor/k8s.io/component-base/metrics/opts.go +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -18,18 +18,13 @@ package metrics import ( "fmt" - "os" - "path/filepath" "strings" "sync" "time" "github.com/prometheus/client_golang/prometheus" - "gopkg.in/yaml.v2" - "k8s.io/apimachinery/pkg/util/sets" promext "k8s.io/component-base/metrics/prometheusextension" - "k8s.io/klog/v2" ) var ( @@ -324,7 +319,6 @@ func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, lab if allowValues, ok := allowList.labelToAllowList[name]; ok { if !allowValues.Has(value) { labelValueList[index] = "unexpected" - cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() } } } @@ -335,7 +329,6 @@ func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]strin if allowValues, ok := allowList.labelToAllowList[name]; ok { if !allowValues.Has(value) { labels[name] = "unexpected" - cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() } } } @@ -361,20 +354,3 @@ func SetLabelAllowListFromCLI(allowListMapping map[string]string) { } } } - -func SetLabelAllowListFromManifest(manifest string) { - allowListLock.Lock() - defer allowListLock.Unlock() - allowListMapping := make(map[string]string) - data, err := os.ReadFile(filepath.Clean(manifest)) - if err != nil { - klog.Errorf("Failed to read allow list manifest: %v", err) - return - } - err = yaml.Unmarshal(data, &allowListMapping) - if err != nil { - klog.Errorf("Failed to parse allow list manifest: %v", err) - return - } - SetLabelAllowListFromCLI(allowListMapping) -} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 39cd2ba2885..fbfb3f5ea1c 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -37,7 +37,7 @@ var ( Namespace: "kubernetes", Name: "healthcheck", Help: "This metric records the result of a single healthcheck.", - StabilityLevel: k8smetrics.STABLE, + StabilityLevel: k8smetrics.BETA, }, []string{"name", "type"}, ) @@ -48,7 +48,7 @@ var ( Namespace: "kubernetes", Name: "healthchecks_total", Help: "This metric records the results of all healthcheck.", - StabilityLevel: k8smetrics.STABLE, + StabilityLevel: k8smetrics.BETA, }, []string{"name", "type", "status"}, ) diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go index 203813e8143..1942f9958d2 100644 --- a/vendor/k8s.io/component-base/metrics/registry.go +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -37,7 +37,7 @@ var ( registriesLock sync.RWMutex disabledMetrics = map[string]struct{}{} - registeredMetricsTotal = NewCounterVec( + registeredMetrics = NewCounterVec( &CounterOpts{ Name: "registered_metrics_total", Help: "The count of registered metrics broken by stability level and deprecation version.", @@ -61,14 +61,6 @@ var ( StabilityLevel: BETA, }, ) - - cardinalityEnforcementUnexpectedCategorizationsTotal = NewCounter( - &CounterOpts{ - Name: "cardinality_enforcement_unexpected_categorizations_total", - Help: "The count of unexpected categorizations during cardinality enforcement.", - StabilityLevel: ALPHA, - }, - ) ) // shouldHide be used to check if a specific metric with deprecated version should be hidden @@ -387,8 +379,7 @@ func NewKubeRegistry() KubeRegistry { } func (r *kubeRegistry) RegisterMetaMetrics() { - r.MustRegister(registeredMetricsTotal) + r.MustRegister(registeredMetrics) r.MustRegister(disabledMetricsTotal) r.MustRegister(hiddenMetricsTotal) - r.MustRegister(cardinalityEnforcementUnexpectedCategorizationsTotal) } diff --git a/vendor/k8s.io/component-base/tracing/tracing.go b/vendor/k8s.io/component-base/tracing/tracing.go index bdf6f377dde..50894eb3b9b 100644 --- a/vendor/k8s.io/component-base/tracing/tracing.go +++ b/vendor/k8s.io/component-base/tracing/tracing.go @@ -68,12 +68,6 @@ func (s *Span) End(logThreshold time.Duration) { } } -// RecordError will record err as an exception span event for this span. -// If this span is not being recorded or err is nil then this method does nothing. -func (s *Span) RecordError(err error, attributes ...attribute.KeyValue) { - s.otelSpan.RecordError(err, trace.WithAttributes(attributes...)) -} - func attributesToFields(attributes []attribute.KeyValue) []utiltrace.Field { fields := make([]utiltrace.Field, len(attributes)) for i := range attributes { diff --git a/vendor/k8s.io/component-base/tracing/utils.go b/vendor/k8s.io/component-base/tracing/utils.go index 72c8cf23e8a..ae894a8091c 100644 --- a/vendor/k8s.io/component-base/tracing/utils.go +++ b/vendor/k8s.io/component-base/tracing/utils.go @@ -25,7 +25,6 @@ import ( "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" oteltrace "go.opentelemetry.io/otel/trace" "k8s.io/client-go/transport" @@ -96,17 +95,9 @@ func WithTracing(handler http.Handler, tp oteltrace.TracerProvider, serviceName otelhttp.WithPropagators(Propagators()), otelhttp.WithTracerProvider(tp), } - wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Add the http.target attribute to the otelhttp span - // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 - if r.URL != nil { - oteltrace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) - } - handler.ServeHTTP(w, r) - }) // With Noop TracerProvider, the otelhttp still handles context propagation. // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough - return otelhttp.NewHandler(wrappedHandler, serviceName, opts...) + return otelhttp.NewHandler(handler, serviceName, opts...) } // WrapperFor can be used to add tracing to a *rest.Config. diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go index 081dae306f1..3a8d765f11a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go @@ -156,9 +156,7 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str } r := &spec3.RequestBody{ RequestBodyProps: spec3.RequestBodyProps{ - Content: map[string]*spec3.MediaType{}, - Description: param.Description(), - Required: param.Required(), + Content: map[string]*spec3.MediaType{}, }, } for _, consume := range consumes { @@ -174,9 +172,9 @@ func (o *openAPI) buildRequestBody(parameters []common.Parameter, consumes []str return nil, nil } -func newOpenAPI(config *common.OpenAPIV3Config) openAPI { +func newOpenAPI(config *common.Config) openAPI { o := openAPI{ - config: config, + config: common.ConvertConfigToV3(config), spec: &spec3.OpenAPI{ Version: "3.0.0", Info: config.Info, @@ -315,27 +313,24 @@ func (o *openAPI) buildOpenAPISpec(webServices []common.RouteContainer) error { // BuildOpenAPISpec builds OpenAPI v3 spec given a list of route containers and common.Config to customize it. // // Deprecated: BuildOpenAPISpecFromRoutes should be used instead. -func BuildOpenAPISpec(webServices []*restful.WebService, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) { +func BuildOpenAPISpec(webServices []*restful.WebService, config *common.Config) (*spec3.OpenAPI, error) { return BuildOpenAPISpecFromRoutes(restfuladapter.AdaptWebServices(webServices), config) } // BuildOpenAPISpecFromRoutes builds OpenAPI v3 spec given a list of route containers and common.Config to customize it. -func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.OpenAPIV3Config) (*spec3.OpenAPI, error) { +func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *common.Config) (*spec3.OpenAPI, error) { a := newOpenAPI(config) err := a.buildOpenAPISpec(webServices) if err != nil { return nil, err } - if config.PostProcessSpec != nil { - return config.PostProcessSpec(a.spec) - } return a.spec, nil } // BuildOpenAPIDefinitionsForResource builds a partial OpenAPI spec given a sample object and common.Config to customize it. // BuildOpenAPIDefinitionsForResources returns the OpenAPI spec which includes the definitions for the // passed type names. -func BuildOpenAPIDefinitionsForResources(config *common.OpenAPIV3Config, names ...string) (map[string]*spec.Schema, error) { +func BuildOpenAPIDefinitionsForResources(config *common.Config, names ...string) (map[string]*spec.Schema, error) { o := newOpenAPI(config) // We can discard the return value of toSchema because all we care about is the side effect of calling it. // All the models created for this resource get added to o.swagger.Definitions diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index a66fe8a0958..76415b7830b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package cached provides a cache mechanism based on etags to lazily +// Package cache provides a cache mechanism based on etags to lazily // build, and/or cache results from expensive operation such that those // operations are not repeated unnecessarily. The operations can be // created as a tree, and replaced dynamically as needed. @@ -25,18 +25,16 @@ limitations under the License. // // This package uses a source/transform/sink model of caches to build // the dependency tree, and can be used as follows: -// - [Func]: A source cache that recomputes the content every time. -// - [Once]: A source cache that always produces the +// - [NewSource]: A source cache that recomputes the content every time. +// - [NewStaticSource]: A source cache that always produces the // same content, it is only called once. -// - [Transform]: A cache that transforms data from one format to +// - [NewTransformer]: A cache that transforms data from one format to // another. It's only refreshed when the source changes. -// - [Merge]: A cache that aggregates multiple caches in a map into one. +// - [NewMerger]: A cache that aggregates multiple caches into one. // It's only refreshed when the source changes. -// - [MergeList]: A cache that aggregates multiple caches in a list into one. -// It's only refreshed when the source changes. -// - [Atomic]: A cache adapter that atomically replaces the source with a new one. -// - [LastSuccess]: A cache adapter that caches the last successful and returns -// it if the next call fails. It extends [Atomic]. +// - [Replaceable]: A cache adapter that can be atomically +// replaced with a new one, and saves the previous results in case an +// error pops-up. // // # Etags // @@ -56,45 +54,61 @@ import ( "sync/atomic" ) -// Value is wrapping a value behind a getter for lazy evaluation. -type Value[T any] interface { - Get() (value T, etag string, err error) -} - -// Result is wrapping T and error into a struct for cases where a tuple is more -// convenient or necessary in Golang. +// Result is the content returned from a call to a cache. It can either +// be created with [NewResultOK] if the call was a success, or +// [NewResultErr] if the call resulted in an error. type Result[T any] struct { - Value T - Etag string - Err error + Data T + Etag string + Err error } -func (r Result[T]) Get() (T, string, error) { - return r.Value, r.Etag, r.Err +// NewResultOK creates a new [Result] for a successful operation. +func NewResultOK[T any](data T, etag string) Result[T] { + return Result[T]{ + Data: data, + Etag: etag, + } } -// Func wraps a (thread-safe) function as a Value[T]. -func Func[T any](fn func() (T, string, error)) Value[T] { - return valueFunc[T](fn) +// NewResultErr creates a new [Result] when an error has happened. +func NewResultErr[T any](err error) Result[T] { + return Result[T]{ + Err: err, + } } -type valueFunc[T any] func() (T, string, error) - -func (c valueFunc[T]) Get() (T, string, error) { - return c() +// Result can be treated as a [Data] if necessary. +func (r Result[T]) Get() Result[T] { + return r } -// Static returns constant values. -func Static[T any](value T, etag string) Value[T] { - return Result[T]{Value: value, Etag: etag} +// Data is a cache that performs an action whose result data will be +// cached. It also returns an "etag" identifier to version the cache, so +// that the caller can know if they have the most recent version of the +// cache (and can decide to cache some operation based on that). +// +// The [NewMerger] and [NewTransformer] automatically handle +// that for you by checking if the etag is updated before calling the +// merging or transforming function. +type Data[T any] interface { + // Returns the cached data, as well as an "etag" to identify the + // version of the cache, or an error if something happened. + Get() Result[T] } -// Merge merges a of cached values. The merge function only gets called if any of -// the dependency has changed. +// NewMerger creates a new merge cache, a cache that merges the result +// of other caches. The function only gets called if any of the +// dependency has changed. // // If any of the dependency returned an error before, or any of the // dependency returned an error this time, or if the mergeFn failed -// before, then the function is run again. +// before, then the function is reran. +// +// The caches and results are mapped by K so that associated data can be +// retrieved. The map of dependencies can not be modified after +// creation, and a new merger should be created (and probably replaced +// using a [Replaceable]). // // Note that this assumes there is no "partial" merge, the merge // function will remerge all the dependencies together everytime. Since @@ -104,19 +118,18 @@ func Static[T any](value T, etag string) Value[T] { // Also note that Golang map iteration is not stable. If the mergeFn // depends on the order iteration to be stable, it will need to // implement its own sorting or iteration order. -func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] { - list := make([]Value[T], 0, len(caches)) - - // map from index to key +func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { + listCaches := make([]Data[T], 0, len(caches)) + // maps from index to key indexes := make(map[int]K, len(caches)) i := 0 for k := range caches { - list = append(list, caches[k]) + listCaches = append(listCaches, caches[k]) indexes[i] = k i++ } - return MergeList(func(results []Result[T]) (V, string, error) { + return NewListMerger(func(results []Result[T]) Result[V] { if len(results) != len(indexes) { panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) } @@ -125,11 +138,20 @@ func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, str m[indexes[i]] = results[i] } return mergeFn(m) - }, list) + }, listCaches) +} + +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) Result[V] + caches []Data[T] + cacheResults []Result[T] + result Result[V] } -// MergeList merges a list of cached values. The function only gets called if -// any of the dependency has changed. +// NewListMerger creates a new merge cache that merges the results of +// other caches in list form. The function only gets called if any of +// the dependency has changed. // // The benefit of ListMerger over the basic Merger is that caches are // stored in an ordered list so the order of the cache will be @@ -143,37 +165,31 @@ func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, str // function will remerge all the dependencies together everytime. Since // the list of dependencies is constant, there is no way to save some // partial merge information either. -func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] { +func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] { return &listMerger[T, V]{ - mergeFn: mergeFn, - delegates: delegates, + mergeFn: mergeFn, + caches: caches, } } -type listMerger[T, V any] struct { - lock sync.Mutex - mergeFn func([]Result[T]) (V, string, error) - delegates []Value[T] - cache []Result[T] - result Result[V] -} - func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { - cacheResults := make([]Result[T], len(c.delegates)) + cacheResults := make([]Result[T], len(c.caches)) ch := make(chan struct { int Result[T] - }, len(c.delegates)) - for i := range c.delegates { + }, len(c.caches)) + for i := range c.caches { go func(index int) { - value, etag, err := c.delegates[index].Get() ch <- struct { int Result[T] - }{index, Result[T]{Value: value, Etag: etag, Err: err}} + }{ + index, + c.caches[index].Get(), + } }(i) } - for i := 0; i < len(c.delegates); i++ { + for i := 0; i < len(c.caches); i++ { res := <-ch cacheResults[res.int] = res.Result } @@ -181,16 +197,16 @@ func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { } func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { - if c.cache == nil { + if c.cacheResults == nil { return true } if c.result.Err != nil { return true } - if len(results) != len(c.cache) { - panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache))) + if len(results) != len(c.cacheResults) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) } - for i, oldResult := range c.cache { + for i, oldResult := range c.cacheResults { newResult := results[i] if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { return true @@ -199,92 +215,98 @@ func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { return false } -func (c *listMerger[T, V]) Get() (V, string, error) { +func (c *listMerger[T, V]) Get() Result[V] { c.lock.Lock() defer c.lock.Unlock() cacheResults := c.prepareResultsLocked() if c.needsRunningLocked(cacheResults) { - c.cache = cacheResults - c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache) + c.cacheResults = cacheResults + c.result = c.mergeFn(c.cacheResults) } - return c.result.Value, c.result.Etag, c.result.Err + return c.result } -// Transform the result of another cached value. The transformFn will only be called -// if the source has updated, otherwise, the result will be returned. +// NewTransformer creates a new cache that transforms the result of +// another cache. The transformFn will only be called if the source +// cache has updated the output, otherwise, the cached result will be +// returned. // // If the dependency returned an error before, or it returns an error // this time, or if the transformerFn failed before, the function is // reran. -func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] { - return MergeList(func(delegates []Result[T]) (V, string, error) { - if len(delegates) != 1 { - panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates)) +func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { + return NewListMerger(func(caches []Result[T]) Result[V] { + if len(caches) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) } - return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err) - }, []Value[T]{source}) + return transformerFn(caches[0]) + }, []Data[T]{source}) +} + +// NewSource creates a new cache that generates some data. This +// will always be called since we don't know the origin of the data and +// if it needs to be updated or not. sourceFn MUST be thread-safe. +func NewSource[T any](sourceFn func() Result[T]) Data[T] { + c := source[T](sourceFn) + return &c } -// Once calls Value[T].Get() lazily and only once, even in case of an error result. -func Once[T any](d Value[T]) Value[T] { - return &once[T]{ - data: d, +type source[T any] func() Result[T] + +func (c *source[T]) Get() Result[T] { + return (*c)() +} + +// NewStaticSource creates a new cache that always generates the +// same data. This will only be called once (lazily). +func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { + return &static[T]{ + fn: staticFn, } } -type once[T any] struct { +type static[T any] struct { once sync.Once - data Value[T] + fn func() Result[T] result Result[T] } -func (c *once[T]) Get() (T, string, error) { +func (c *static[T]) Get() Result[T] { c.once.Do(func() { - c.result.Value, c.result.Etag, c.result.Err = c.data.Get() + c.result = c.fn() }) - return c.result.Value, c.result.Etag, c.result.Err -} - -// Replaceable extends the Value[T] interface with the ability to change the -// underlying Value[T] after construction. -type Replaceable[T any] interface { - Value[T] - Store(Value[T]) + return c.result } -// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements -// Replaceable[T]. -type Atomic[T any] struct { - value atomic.Pointer[Value[T]] +// Replaceable is a cache that carries the result even when the cache is +// replaced. This is the type that should typically be stored in +// structs. +type Replaceable[T any] struct { + cache atomic.Pointer[Data[T]] + result atomic.Pointer[Result[T]] } -var _ Replaceable[[]byte] = &Atomic[[]byte]{} - -func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) } -func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() } - -// LastSuccess calls Value[T].Get(), but hides errors by returning the last -// success if there has been any. -type LastSuccess[T any] struct { - Atomic[T] - success atomic.Pointer[Result[T]] -} - -var _ Replaceable[[]byte] = &LastSuccess[[]byte]{} - -func (c *LastSuccess[T]) Get() (T, string, error) { - success := c.success.Load() - value, etag, err := c.Atomic.Get() - if err == nil { - if success == nil { - c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err}) +// Get retrieves the data from the underlying source. [Replaceable] +// implements the [Data] interface itself. This is a pass-through +// that calls the most recent underlying cache. If the cache fails but +// previously had returned a success, that success will be returned +// instead. If the cache fails but we never returned a success, that +// failure is returned. +func (c *Replaceable[T]) Get() Result[T] { + result := (*c.cache.Load()).Get() + + for { + cResult := c.result.Load() + if result.Err != nil && cResult != nil && cResult.Err == nil { + return *cResult + } + if c.result.CompareAndSwap(cResult, &result) { + return result } - return value, etag, err - } - - if success != nil { - return success.Value, success.Etag, success.Err } +} - return value, etag, err +// Replace changes the cache. +func (c *Replaceable[T]) Replace(cache Data[T]) { + c.cache.Swap(&cache) } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index e4ce843b0c9..1a6c12e17a5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -22,6 +22,7 @@ import ( "github.com/emicklei/go-restful/v3" + "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -164,9 +165,6 @@ type OpenAPIV3Config struct { // It is an optional function to customize model names. GetDefinitionName func(name string) (string, spec.Extensions) - // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. - PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error) - // SecuritySchemes is list of all security schemes for OpenAPI service. SecuritySchemes spec3.SecuritySchemes @@ -174,6 +172,43 @@ type OpenAPIV3Config struct { DefaultSecurity []map[string][]string } +// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object +func ConvertConfigToV3(config *Config) *OpenAPIV3Config { + if config == nil { + return nil + } + + v3Config := &OpenAPIV3Config{ + Info: config.Info, + IgnorePrefixes: config.IgnorePrefixes, + GetDefinitions: config.GetDefinitions, + GetOperationIDAndTags: config.GetOperationIDAndTags, + GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute, + GetDefinitionName: config.GetDefinitionName, + Definitions: config.Definitions, + SecuritySchemes: make(spec3.SecuritySchemes), + DefaultSecurity: config.DefaultSecurity, + DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}), + + CommonResponses: make(map[int]*spec3.Response), + ResponseDefinitions: make(map[string]*spec3.Response), + } + + if config.SecurityDefinitions != nil { + for s, securityScheme := range *config.SecurityDefinitions { + v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme) + } + } + for k, commonResponse := range config.CommonResponses { + v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"}) + } + + for k, responseDefinition := range config.ResponseDefinitions { + v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"}) + } + return v3Config +} + type typeInfo struct { name string format string diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/markers.go b/vendor/k8s.io/kube-openapi/pkg/generators/markers.go deleted file mode 100644 index 2236ea13dfc..00000000000 --- a/vendor/k8s.io/kube-openapi/pkg/generators/markers.go +++ /dev/null @@ -1,573 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generators - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - defaultergen "k8s.io/gengo/examples/defaulter-gen/generators" - "k8s.io/gengo/types" - openapi "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -type CELTag struct { - Rule string `json:"rule,omitempty"` - Message string `json:"message,omitempty"` - MessageExpression string `json:"messageExpression,omitempty"` - OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"` -} - -func (c *CELTag) Validate() error { - if c == nil || *c == (CELTag{}) { - return fmt.Errorf("empty CEL tag is not allowed") - } - - var errs []error - if c.Rule == "" { - errs = append(errs, fmt.Errorf("rule cannot be empty")) - } - if c.Message == "" && c.MessageExpression == "" { - errs = append(errs, fmt.Errorf("message or messageExpression must be set")) - } - if c.Message != "" && c.MessageExpression != "" { - errs = append(errs, fmt.Errorf("message and messageExpression cannot be set at the same time")) - } - - if len(errs) > 0 { - return errors.Join(errs...) - } - - return nil -} - -// CommentTags represents the parsed comment tags for a given type. These types are then used to generate schema validations. -type CommentTags struct { - spec.SchemaProps - - CEL []CELTag `json:"cel,omitempty"` - - // Future markers can all be parsed into this centralized struct... - // Optional bool `json:"optional,omitempty"` - // Default any `json:"default,omitempty"` -} - -// validates the parameters in a CommentTags instance. Returns any errors encountered. -func (c CommentTags) Validate() error { - - var err error - - if c.MinLength != nil && *c.MinLength < 0 { - err = errors.Join(err, fmt.Errorf("minLength cannot be negative")) - } - if c.MaxLength != nil && *c.MaxLength < 0 { - err = errors.Join(err, fmt.Errorf("maxLength cannot be negative")) - } - if c.MinItems != nil && *c.MinItems < 0 { - err = errors.Join(err, fmt.Errorf("minItems cannot be negative")) - } - if c.MaxItems != nil && *c.MaxItems < 0 { - err = errors.Join(err, fmt.Errorf("maxItems cannot be negative")) - } - if c.MinProperties != nil && *c.MinProperties < 0 { - err = errors.Join(err, fmt.Errorf("minProperties cannot be negative")) - } - if c.MaxProperties != nil && *c.MaxProperties < 0 { - err = errors.Join(err, fmt.Errorf("maxProperties cannot be negative")) - } - if c.Minimum != nil && c.Maximum != nil && *c.Minimum > *c.Maximum { - err = errors.Join(err, fmt.Errorf("minimum %f is greater than maximum %f", *c.Minimum, *c.Maximum)) - } - if (c.ExclusiveMinimum || c.ExclusiveMaximum) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum { - err = errors.Join(err, fmt.Errorf("exclusiveMinimum/Maximum cannot be set when minimum == maximum")) - } - if c.MinLength != nil && c.MaxLength != nil && *c.MinLength > *c.MaxLength { - err = errors.Join(err, fmt.Errorf("minLength %d is greater than maxLength %d", *c.MinLength, *c.MaxLength)) - } - if c.MinItems != nil && c.MaxItems != nil && *c.MinItems > *c.MaxItems { - err = errors.Join(err, fmt.Errorf("minItems %d is greater than maxItems %d", *c.MinItems, *c.MaxItems)) - } - if c.MinProperties != nil && c.MaxProperties != nil && *c.MinProperties > *c.MaxProperties { - err = errors.Join(err, fmt.Errorf("minProperties %d is greater than maxProperties %d", *c.MinProperties, *c.MaxProperties)) - } - if c.Pattern != "" { - _, e := regexp.Compile(c.Pattern) - if e != nil { - err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", c.Pattern, e)) - } - } - if c.MultipleOf != nil && *c.MultipleOf == 0 { - err = errors.Join(err, fmt.Errorf("multipleOf cannot be 0")) - } - - for i, celTag := range c.CEL { - celError := celTag.Validate() - if celError == nil { - continue - } - err = errors.Join(err, fmt.Errorf("invalid CEL tag at index %d: %w", i, celError)) - } - - return err -} - -// Performs type-specific validation for CommentTags porameters. Accepts a Type instance and returns any errors encountered during validation. -func (c CommentTags) ValidateType(t *types.Type) error { - var err error - - resolvedType := resolveAliasAndPtrType(t) - typeString, _ := openapi.OpenAPITypeFormat(resolvedType.String()) // will be empty for complicated types - isNoValidate := resolvedType.Kind == types.Interface || resolvedType.Kind == types.Struct - - if !isNoValidate { - - isArray := resolvedType.Kind == types.Slice || resolvedType.Kind == types.Array - isMap := resolvedType.Kind == types.Map - isString := typeString == "string" - isInt := typeString == "integer" - isFloat := typeString == "number" - - if c.MaxItems != nil && !isArray { - err = errors.Join(err, fmt.Errorf("maxItems can only be used on array types")) - } - if c.MinItems != nil && !isArray { - err = errors.Join(err, fmt.Errorf("minItems can only be used on array types")) - } - if c.UniqueItems && !isArray { - err = errors.Join(err, fmt.Errorf("uniqueItems can only be used on array types")) - } - if c.MaxProperties != nil && !isMap { - err = errors.Join(err, fmt.Errorf("maxProperties can only be used on map types")) - } - if c.MinProperties != nil && !isMap { - err = errors.Join(err, fmt.Errorf("minProperties can only be used on map types")) - } - if c.MinLength != nil && !isString { - err = errors.Join(err, fmt.Errorf("minLength can only be used on string types")) - } - if c.MaxLength != nil && !isString { - err = errors.Join(err, fmt.Errorf("maxLength can only be used on string types")) - } - if c.Pattern != "" && !isString { - err = errors.Join(err, fmt.Errorf("pattern can only be used on string types")) - } - if c.Minimum != nil && !isInt && !isFloat { - err = errors.Join(err, fmt.Errorf("minimum can only be used on numeric types")) - } - if c.Maximum != nil && !isInt && !isFloat { - err = errors.Join(err, fmt.Errorf("maximum can only be used on numeric types")) - } - if c.MultipleOf != nil && !isInt && !isFloat { - err = errors.Join(err, fmt.Errorf("multipleOf can only be used on numeric types")) - } - if c.ExclusiveMinimum && !isInt && !isFloat { - err = errors.Join(err, fmt.Errorf("exclusiveMinimum can only be used on numeric types")) - } - if c.ExclusiveMaximum && !isInt && !isFloat { - err = errors.Join(err, fmt.Errorf("exclusiveMaximum can only be used on numeric types")) - } - } - - return err -} - -// Parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result. -// Accepts an optional type to validate against, and a prefix to filter out markers not related to validation. -// Accepts a prefix to filter out markers not related to validation. -// Returns any errors encountered while parsing or validating the comment tags. -func ParseCommentTags(t *types.Type, comments []string, prefix string) (CommentTags, error) { - - markers, err := parseMarkers(comments, prefix) - if err != nil { - return CommentTags{}, fmt.Errorf("failed to parse marker comments: %w", err) - } - nested, err := nestMarkers(markers) - if err != nil { - return CommentTags{}, fmt.Errorf("invalid marker comments: %w", err) - } - - // Parse the map into a CommentTags type by marshalling and unmarshalling - // as JSON in leiu of an unstructured converter. - out, err := json.Marshal(nested) - if err != nil { - return CommentTags{}, fmt.Errorf("failed to marshal marker comments: %w", err) - } - - var commentTags CommentTags - if err = json.Unmarshal(out, &commentTags); err != nil { - return CommentTags{}, fmt.Errorf("failed to unmarshal marker comments: %w", err) - } - - // Validate the parsed comment tags - validationErrors := commentTags.Validate() - - if t != nil { - validationErrors = errors.Join(validationErrors, commentTags.ValidateType(t)) - } - - if validationErrors != nil { - return CommentTags{}, fmt.Errorf("invalid marker comments: %w", validationErrors) - } - - return commentTags, nil -} - -var ( - allowedKeyCharacterSet = `[:_a-zA-Z0-9\[\]\-]` - valueEmpty = regexp.MustCompile(fmt.Sprintf(`^(%s*)$`, allowedKeyCharacterSet)) - valueAssign = regexp.MustCompile(fmt.Sprintf(`^(%s*)=(.*)$`, allowedKeyCharacterSet)) - valueRawString = regexp.MustCompile(fmt.Sprintf(`^(%s*)>(.*)$`, allowedKeyCharacterSet)) -) - -// extractCommentTags parses comments for lines of the form: -// -// 'marker' + "key=value" -// -// or to specify truthy boolean keys: -// -// 'marker' + "key" -// -// Values are optional; "" is the default. A tag can be specified more than -// one time and all values are returned. Returns a map with an entry for -// for each key and a value. -// -// Similar to version from gengo, but this version support only allows one -// value per key (preferring explicit array indices), supports raw strings -// with concatenation, and limits the usable characters allowed in a key -// (for simpler parsing). -// -// Assignments and empty values have the same syntax as from gengo. Raw strings -// have the syntax: -// -// 'marker' + "key>value" -// 'marker' + "key>value" -// -// Successive usages of the same raw string key results in concatenating each -// line with `\n` in between. It is an error to use `=` to assing to a previously -// assigned key -// (in contrast to types.ExtractCommentTags which allows array-typed -// values to be specified using `=`). -func extractCommentTags(marker string, lines []string) (map[string]string, error) { - out := map[string]string{} - - // Used to track the the line immediately prior to the one being iterated. - // If there was an invalid or ignored line, these values get reset. - lastKey := "" - lastIndex := -1 - lastArrayKey := "" - - var lintErrors []error - - for _, line := range lines { - line = strings.Trim(line, " ") - - // Track the current value of the last vars to use in this loop iteration - // before they are reset for the next iteration. - previousKey := lastKey - previousArrayKey := lastArrayKey - previousIndex := lastIndex - - // Make sure last vars gets reset if we `continue` - lastIndex = -1 - lastArrayKey = "" - lastKey = "" - - if len(line) == 0 { - continue - } else if !strings.HasPrefix(line, marker) { - continue - } - - line = strings.TrimPrefix(line, marker) - - key := "" - value := "" - - if matches := valueAssign.FindStringSubmatch(line); matches != nil { - key = matches[1] - value = matches[2] - - // If key exists, throw error. - // Some of the old kube open-api gen marker comments like - // `+listMapKeys` allowed a list to be specified by writing key=value - // multiple times. - // - // This is not longer supported for the prefixed marker comments. - // This is to prevent confusion with the new array syntax which - // supports lists of objects. - // - // The old marker comments like +listMapKeys will remain functional, - // but new markers will not support it. - if _, ok := out[key]; ok { - return nil, fmt.Errorf("cannot have multiple values for key '%v'", key) - } - - } else if matches := valueEmpty.FindStringSubmatch(line); matches != nil { - key = matches[1] - value = "" - - } else if matches := valueRawString.FindStringSubmatch(line); matches != nil { - toAdd := strings.Trim(string(matches[2]), " ") - - key = matches[1] - - // First usage as a raw string. - if existing, exists := out[key]; !exists { - - // Encode the raw string as JSON to ensure that it is properly escaped. - valueBytes, err := json.Marshal(toAdd) - if err != nil { - return nil, fmt.Errorf("invalid value for key %v: %w", key, err) - } - - value = string(valueBytes) - } else if key != previousKey { - // Successive usages of the same key of a raw string must be - // consecutive - return nil, fmt.Errorf("concatenations to key '%s' must be consecutive with its assignment", key) - } else { - // If it is a consecutive repeat usage, concatenate to the - // existing value. - // - // Decode JSON string, append to it, re-encode JSON string. - // Kinda janky but this is a code-generator... - var unmarshalled string - if err := json.Unmarshal([]byte(existing), &unmarshalled); err != nil { - return nil, fmt.Errorf("invalid value for key %v: %w", key, err) - } else { - unmarshalled += "\n" + toAdd - valueBytes, err := json.Marshal(unmarshalled) - if err != nil { - return nil, fmt.Errorf("invalid value for key %v: %w", key, err) - } - - value = string(valueBytes) - } - } - } else { - // Comment has the correct prefix, but incorrect syntax, so it is an - // error - return nil, fmt.Errorf("invalid marker comment does not match expected `+key=` pattern: %v", line) - } - - out[key] = value - lastKey = key - - // Lint the array subscript for common mistakes. This only lints the last - // array index used, (since we do not have a need for nested arrays yet - // in markers) - if arrayPath, index, hasSubscript, err := extractArraySubscript(key); hasSubscript { - // If index is non-zero, check that that previous line was for the same - // key and either the same or previous index - if err != nil { - lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: expected integer index in key '%v'", line, key)) - } else if previousArrayKey != arrayPath && index != 0 { - lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: non-consecutive index %v for key '%v'", line, index, arrayPath)) - } else if index != previousIndex+1 && index != previousIndex { - lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: non-consecutive index %v for key '%v'", line, index, arrayPath)) - } - - lastIndex = index - lastArrayKey = arrayPath - } - } - - if len(lintErrors) > 0 { - return nil, errors.Join(lintErrors...) - } - - return out, nil -} - -// Extracts and parses the given marker comments into a map of key -> value. -// Accepts a prefix to filter out markers not related to validation. -// The prefix is removed from the key in the returned map. -// Empty keys and invalid values will return errors, refs are currently unsupported and will be skipped. -func parseMarkers(markerComments []string, prefix string) (map[string]any, error) { - markers, err := extractCommentTags(prefix, markerComments) - if err != nil { - return nil, err - } - - // Parse the values as JSON - result := map[string]any{} - for key, value := range markers { - var unmarshalled interface{} - - if len(key) == 0 { - return nil, fmt.Errorf("cannot have empty key for marker comment") - } else if _, ok := defaultergen.ParseSymbolReference(value, ""); ok { - // Skip ref markers - continue - } else if len(value) == 0 { - // Empty value means key is implicitly a bool - result[key] = true - } else if err := json.Unmarshal([]byte(value), &unmarshalled); err != nil { - // Not valid JSON, throw error - return nil, fmt.Errorf("failed to parse value for key %v as JSON: %w", key, err) - } else { - // Is is valid JSON, use as a JSON value - result[key] = unmarshalled - } - } - return result, nil -} - -// Converts a map of: -// -// "a:b:c": 1 -// "a:b:d": 2 -// "a:e": 3 -// "f": 4 -// -// Into: -// -// map[string]any{ -// "a": map[string]any{ -// "b": map[string]any{ -// "c": 1, -// "d": 2, -// }, -// "e": 3, -// }, -// "f": 4, -// } -// -// Returns a list of joined errors for any invalid keys. See putNestedValue for more details. -func nestMarkers(markers map[string]any) (map[string]any, error) { - nested := make(map[string]any) - var errs []error - for key, value := range markers { - var err error - keys := strings.Split(key, ":") - - if err = putNestedValue(nested, keys, value); err != nil { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return nil, errors.Join(errs...) - } - - return nested, nil -} - -// Recursively puts a value into the given keypath, creating intermediate maps -// and slices as needed. If a key is of the form `foo[bar]`, then bar will be -// treated as an index into the array foo. If bar is not a valid integer, putNestedValue returns an error. -func putNestedValue(m map[string]any, k []string, v any) error { - if len(k) == 0 { - return nil - } - - key := k[0] - rest := k[1:] - - // Array case - if arrayKeyWithoutSubscript, index, hasSubscript, err := extractArraySubscript(key); err != nil { - return fmt.Errorf("error parsing subscript for key %v: %w", key, err) - } else if hasSubscript { - key = arrayKeyWithoutSubscript - var arrayDestination []any - if existing, ok := m[key]; !ok { - arrayDestination = make([]any, index+1) - } else if existing, ok := existing.([]any); !ok { - // Error case. Existing isn't of correct type. Can happen if - // someone is subscripting a field that was previously not an array - return fmt.Errorf("expected []any at key %v, got %T", key, existing) - } else if index >= len(existing) { - // Ensure array is big enough - arrayDestination = append(existing, make([]any, index-len(existing)+1)...) - } else { - arrayDestination = existing - } - - m[key] = arrayDestination - if arrayDestination[index] == nil { - // Doesn't exist case, create the destination. - // Assumes the destination is a map for now. Theoretically could be - // extended to support arrays of arrays, but that's not needed yet. - destination := make(map[string]any) - arrayDestination[index] = destination - if err = putNestedValue(destination, rest, v); err != nil { - return err - } - } else if dst, ok := arrayDestination[index].(map[string]any); ok { - // Already exists case, correct type - if putNestedValue(dst, rest, v); err != nil { - return err - } - } else { - // Already exists, incorrect type. Error - // This shouldn't be possible. - return fmt.Errorf("expected map at %v[%v], got %T", key, index, arrayDestination[index]) - } - - return nil - } else if len(rest) == 0 { - // Base case. Single key. Just set into destination - m[key] = v - return nil - } - - if existing, ok := m[key]; !ok { - destination := make(map[string]any) - m[key] = destination - return putNestedValue(destination, rest, v) - } else if destination, ok := existing.(map[string]any); ok { - return putNestedValue(destination, rest, v) - } else { - // Error case. Existing isn't of correct type. Can happen if prior comment - // referred to value as an error - return fmt.Errorf("expected map[string]any at key %v, got %T", key, existing) - } -} - -// extractArraySubscript extracts the left array subscript from a key of -// the form `foo[bar][baz]` -> "bar". -// Returns the key without the subscript, the index, and a bool indicating if -// the key had a subscript. -// If the key has a subscript, but the subscript is not a valid integer, returns an error. -// -// This can be adapted to support multidimensional subscripts probably fairly -// easily by retuning a list of ints -func extractArraySubscript(str string) (string, int, bool, error) { - subscriptIdx := strings.Index(str, "[") - if subscriptIdx == -1 { - return "", -1, false, nil - } - - subscript := strings.Split(str[subscriptIdx+1:], "]")[0] - if len(subscript) == 0 { - return "", -1, false, fmt.Errorf("empty subscript not allowed") - } - - index, err := strconv.Atoi(subscript) - if err != nil { - return "", -1, false, fmt.Errorf("expected integer index in key %v", str) - } else if index < 0 { - return "", -1, false, fmt.Errorf("subscript '%v' is invalid. index must be positive", subscript) - } - - return str[:subscriptIdx], index, true, nil -} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 259f21dbc0a..4654bbe9c7c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -26,19 +26,16 @@ import ( "sort" "strings" - defaultergen "k8s.io/gengo/examples/defaulter-gen/generators" "k8s.io/gengo/generator" "k8s.io/gengo/namer" "k8s.io/gengo/types" openapi "k8s.io/kube-openapi/pkg/common" - "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/klog/v2" ) // This is the comment tag that carries parameters for open API generation. const tagName = "k8s:openapi-gen" -const markerPrefix = "+k8s:validation:" const tagOptional = "optional" const tagDefault = "default" @@ -123,7 +120,7 @@ func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generat DefaultGen: generator.DefaultGen{ OptionalName: sanitizedName, }, - imports: generator.NewImportTrackerForPackage(targetPackage), + imports: generator.NewImportTracker(), targetPackage: targetPackage, } } @@ -355,75 +352,10 @@ func (g openAPITypeWriter) generateCall(t *types.Type) error { return g.Error() } -func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error { - - if vs == nil { - return nil - } - args := generator.Args{ - "ptrTo": &types.Type{ - Name: types.Name{ - Package: "k8s.io/utils/ptr", - Name: "To", - }}, - "spec": vs, - } - if vs.Minimum != nil { - g.Do("Minimum: $.ptrTo|raw$[float64]($.spec.Minimum$),\n", args) - } - if vs.Maximum != nil { - g.Do("Maximum: $.ptrTo|raw$[float64]($.spec.Maximum$),\n", args) - } - if vs.ExclusiveMinimum { - g.Do("ExclusiveMinimum: true,\n", args) - } - if vs.ExclusiveMaximum { - g.Do("ExclusiveMaximum: true,\n", args) - } - if vs.MinLength != nil { - g.Do("MinLength: $.ptrTo|raw$[int64]($.spec.MinLength$),\n", args) - } - if vs.MaxLength != nil { - g.Do("MaxLength: $.ptrTo|raw$[int64]($.spec.MaxLength$),\n", args) - } - - if vs.MinProperties != nil { - g.Do("MinProperties: $.ptrTo|raw$[int64]($.spec.MinProperties$),\n", args) - } - if vs.MaxProperties != nil { - g.Do("MaxProperties: $.ptrTo|raw$[int64]($.spec.MaxProperties$),\n", args) - } - if len(vs.Pattern) > 0 { - p, err := json.Marshal(vs.Pattern) - if err != nil { - return err - } - g.Do("Pattern: $.$,\n", string(p)) - } - if vs.MultipleOf != nil { - g.Do("MultipleOf: $.ptrTo|raw$[float64]($.spec.MultipleOf$),\n", args) - } - if vs.MinItems != nil { - g.Do("MinItems: $.ptrTo|raw$[int64]($.spec.MinItems$),\n", args) - } - if vs.MaxItems != nil { - g.Do("MaxItems: $.ptrTo|raw$[int64]($.spec.MaxItems$),\n", args) - } - if vs.UniqueItems { - g.Do("UniqueItems: true,\n", nil) - } - return nil -} - func (g openAPITypeWriter) generate(t *types.Type) error { // Only generate for struct type and ignore the rest switch t.Kind { case types.Struct: - overrides, err := ParseCommentTags(t, t.CommentLines, markerPrefix) - if err != nil { - return err - } - hasV2Definition := hasOpenAPIDefinitionMethod(t) hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t) hasV3OneOfTypes := hasOpenAPIV3OneOfMethod(t) @@ -443,17 +375,10 @@ func (g openAPITypeWriter) generate(t *types.Type) error { "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) - err = g.generateValueValidations(&overrides.SchemaProps) - if err != nil { - return err - } - g.Do("},\n", nil) - if err := g.generateStructExtensions(t, overrides); err != nil { - return err - } - g.Do("},\n", nil) - g.Do("})\n}\n\n", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "})\n}\n\n", args) return nil case hasV2DefinitionTypeAndFormat && hasV3OneOfTypes: // generate v3 def. @@ -462,34 +387,20 @@ func (g openAPITypeWriter) generate(t *types.Type) error { "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("OneOf:common.GenerateOpenAPIV3OneOfSchema($.type|raw${}.OpenAPIV3OneOfTypes()),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) - err = g.generateValueValidations(&overrides.SchemaProps) - if err != nil { - return err - } - g.Do("},\n", nil) - if err := g.generateStructExtensions(t, overrides); err != nil { - return err - } - g.Do("},\n", nil) - g.Do("},", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "},", args) // generate v2 def. g.Do("$.OpenAPIDefinition|raw${\n"+ "Schema: spec.Schema{\n"+ "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) - err = g.generateValueValidations(&overrides.SchemaProps) - if err != nil { - return err - } - g.Do("},\n", nil) - if err := g.generateStructExtensions(t, overrides); err != nil { - return err - } - g.Do("},\n", nil) - g.Do("})\n}\n\n", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "})\n}\n\n", args) return nil case hasV2DefinitionTypeAndFormat: g.Do("return $.OpenAPIDefinition|raw${\n"+ @@ -497,30 +408,18 @@ func (g openAPITypeWriter) generate(t *types.Type) error { "SchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+ - "Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args) - err = g.generateValueValidations(&overrides.SchemaProps) - if err != nil { - return err - } - g.Do("},\n", nil) - if err := g.generateStructExtensions(t, overrides); err != nil { - return err - } - g.Do("},\n", nil) - g.Do("}\n}\n\n", args) + "Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+ + "},\n"+ + "},\n"+ + "}\n}\n\n", args) return nil case hasV3OneOfTypes: // having v3 oneOf types without custom v2 type or format does not make sense. return fmt.Errorf("type %q has v3 one of types but not v2 type or format", t.Name) } - g.Do("return $.OpenAPIDefinition|raw${\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", args) g.generateDescription(t.CommentLines) g.Do("Type: []string{\"object\"},\n", nil) - err = g.generateValueValidations(&overrides.SchemaProps) - if err != nil { - return err - } // write members into a temporary buffer, in order to postpone writing out the Properties field. We only do // that if it is not empty. @@ -541,7 +440,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error { g.Do("Required: []string{\"$.$\"},\n", strings.Join(required, "\",\"")) } g.Do("},\n", nil) - if err := g.generateStructExtensions(t, overrides); err != nil { + if err := g.generateStructExtensions(t); err != nil { return err } g.Do("},\n", nil) @@ -574,7 +473,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error { return nil } -func (g openAPITypeWriter) generateStructExtensions(t *types.Type, tags CommentTags) error { +func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error { extensions, errors := parseExtensions(t.CommentLines) // Initially, we will only log struct extension errors. if len(errors) > 0 { @@ -590,11 +489,11 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type, tags CommentT } // TODO(seans3): Validate struct extensions here. - g.emitExtensions(extensions, unions, tags.CEL) + g.emitExtensions(extensions, unions) return nil } -func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type, tags CommentTags) error { +func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type) error { extensions, parseErrors := parseExtensions(m.CommentLines) validationErrors := validateMemberExtensions(extensions, m) errors := append(parseErrors, validationErrors...) @@ -605,13 +504,13 @@ func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *typ klog.V(2).Infof("%s %s\n", errorPrefix, e) } } - g.emitExtensions(extensions, nil, tags.CEL) + g.emitExtensions(extensions, nil) return nil } -func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union, celRules []CELTag) { +func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union) { // If any extensions exist, then emit code to create them. - if len(extensions) == 0 && len(unions) == 0 && len(celRules) == 0 { + if len(extensions) == 0 && len(unions) == 0 { return } g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil) @@ -634,37 +533,6 @@ func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union } g.Do("},\n", nil) } - - if len(celRules) > 0 { - g.Do("\"x-kubernetes-validations\": []interface{}{\n", nil) - for _, rule := range celRules { - g.Do("map[string]interface{}{\n", nil) - - g.Do("\"rule\": $.$,\n", fmt.Sprintf("%#v", rule.Rule)) - - if len(rule.Message) > 0 { - g.Do("\"message\": $.$,\n", fmt.Sprintf("%#v", rule.Message)) - } - - if len(rule.MessageExpression) > 0 { - g.Do("\"messageExpression\": $.$,\n", fmt.Sprintf("%#v", rule.MessageExpression)) - } - - if rule.OptionalOldSelf != nil && *rule.OptionalOldSelf { - g.Do("\"optionalOldSelf\": $.ptrTo|raw$[bool](true),\n", generator.Args{ - "ptrTo": &types.Type{ - Name: types.Name{ - Package: "k8s.io/utils/ptr", - Name: "To", - }}, - }) - } - - g.Do("},\n", nil) - } - g.Do("},\n", nil) - } - g.Do("},\n},\n", nil) } @@ -685,83 +553,23 @@ func (g openAPITypeWriter) validatePatchTags(m *types.Member, parent *types.Type return nil } -func defaultFromComments(comments []string, commentPath string, t *types.Type) (interface{}, *types.Name, error) { - var tag string - - for { - var err error - tag, err = getSingleTagsValue(comments, tagDefault) - if err != nil { - return nil, nil, err - } - - if t == nil || len(tag) > 0 { - break - } - - comments = t.CommentLines - commentPath = t.Name.Package - switch t.Kind { - case types.Pointer: - t = t.Elem - case types.Alias: - t = t.Underlying - default: - t = nil - } - } - +func defaultFromComments(comments []string) (interface{}, error) { + tag, err := getSingleTagsValue(comments, tagDefault) if tag == "" { - return nil, nil, nil + return nil, err } - var i interface{} - if id, ok := defaultergen.ParseSymbolReference(tag, commentPath); ok { - klog.Errorf("%v, %v", id, commentPath) - return nil, &id, nil - } else if err := json.Unmarshal([]byte(tag), &i); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal default: %v", err) - } - return i, nil, nil -} - -func implementsCustomUnmarshalling(t *types.Type) bool { - switch t.Kind { - case types.Pointer: - unmarshaller, isUnmarshaller := t.Elem.Methods["UnmarshalJSON"] - return isUnmarshaller && unmarshaller.Signature.Receiver.Kind == types.Pointer - case types.Struct: - _, isUnmarshaller := t.Methods["UnmarshalJSON"] - return isUnmarshaller - default: - return false + if err := json.Unmarshal([]byte(tag), &i); err != nil { + return nil, fmt.Errorf("failed to unmarshal default: %v", err) } + return i, nil } func mustEnforceDefault(t *types.Type, omitEmpty bool) (interface{}, error) { - // Treat types with custom unmarshalling as a value - // (Can be alias, struct, or pointer) - if implementsCustomUnmarshalling(t) { - // Since Go JSON deserializer always feeds `null` when present - // to structs with custom UnmarshalJSON, the zero value for - // these structs is also null. - // - // In general, Kubernetes API types with custom marshalling should - // marshal their empty values to `null`. - return nil, nil - } - switch t.Kind { - case types.Alias: - return mustEnforceDefault(t.Underlying, omitEmpty) case types.Pointer, types.Map, types.Slice, types.Array, types.Interface: return nil, nil case types.Struct: - if len(t.Members) == 1 && t.Members[0].Embedded { - // Treat a struct with a single embedded member the same as an alias - return mustEnforceDefault(t.Members[0].Type, omitEmpty) - } - return map[string]interface{}{}, nil case types.Builtin: if !omitEmpty { @@ -777,8 +585,9 @@ func mustEnforceDefault(t *types.Type, omitEmpty bool) (interface{}, error) { } } -func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omitEmpty bool, commentOwningType *types.Type) error { - def, ref, err := defaultFromComments(comments, commentOwningType.Name.Package, t) +func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omitEmpty bool) error { + t = resolveAliasAndEmbeddedType(t) + def, err := defaultFromComments(comments) if err != nil { return err } @@ -794,8 +603,6 @@ func (g openAPITypeWriter) generateDefault(comments []string, t *types.Type, omi } if def != nil { g.Do("Default: $.$,\n", fmt.Sprintf("%#v", def)) - } else if ref != nil { - g.Do("Default: $.|raw$,\n", &types.Type{Name: *ref}) } return nil } @@ -849,15 +656,11 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) if name == "" { return nil } - overrides, err := ParseCommentTags(m.Type, m.CommentLines, markerPrefix) - if err != nil { - return err - } if err := g.validatePatchTags(m, parent); err != nil { return err } g.Do("\"$.$\": {\n", name) - if err := g.generateMemberExtensions(m, parent, overrides); err != nil { + if err := g.generateMemberExtensions(m, parent); err != nil { return err } g.Do("SchemaProps: spec.SchemaProps{\n", nil) @@ -873,13 +676,9 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type) return nil } omitEmpty := strings.Contains(reflect.StructTag(m.Tags).Get("json"), "omitempty") - if err := g.generateDefault(m.CommentLines, m.Type, omitEmpty, parent); err != nil { + if err := g.generateDefault(m.CommentLines, m.Type, omitEmpty); err != nil { return fmt.Errorf("failed to generate default in %v: %v: %v", parent, m.Name, err) } - err = g.generateValueValidations(&overrides.SchemaProps) - if err != nil { - return err - } t := resolveAliasAndPtrType(m.Type) // If we can get a openAPI type and format for this type, we consider it to be simple property typeString, format := openapi.OpenAPITypeFormat(t.String()) @@ -922,6 +721,22 @@ func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) { g.Do("Ref: ref(\"$.$\"),\n", t.Name.String()) } +func resolveAliasAndEmbeddedType(t *types.Type) *types.Type { + var prev *types.Type + for prev != t { + prev = t + if t.Kind == types.Alias { + t = t.Underlying + } + if t.Kind == types.Struct { + if len(t.Members) == 1 && t.Members[0].Embedded { + t = t.Members[0].Type + } + } + } + return t +} + func resolveAliasAndPtrType(t *types.Type) *types.Type { var prev *types.Type for prev != t { @@ -947,7 +762,7 @@ func (g openAPITypeWriter) generateMapProperty(t *types.Type) error { g.Do("Type: []string{\"object\"},\n", nil) g.Do("AdditionalProperties: &spec.SchemaOrBool{\nAllows: true,\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) - if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false, t.Elem); err != nil { + if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false); err != nil { return err } typeString, format := openapi.OpenAPITypeFormat(elemType.String()) @@ -980,7 +795,7 @@ func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error { elemType := resolveAliasAndPtrType(t.Elem) g.Do("Type: []string{\"array\"},\n", nil) g.Do("Items: &spec.SchemaOrArray{\nSchema: &spec.Schema{\nSchemaProps: spec.SchemaProps{\n", nil) - if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false, t.Elem); err != nil { + if err := g.generateDefault(t.Elem.CommentLines, t.Elem, false); err != nil { return err } typeString, format := openapi.OpenAPITypeFormat(elemType.String()) diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go index 0abe0aa0729..474d79e89dc 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go @@ -33,8 +33,7 @@ func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) { continue } - // All slice fields must have a list-type tag except []byte - if m.Type.Kind == types.Slice && m.Type.Elem != types.Byte && !hasListType { + if m.Type.Kind == types.Slice && !hasListType { fields = append(fields, m.Name) continue } diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go index 53e870c1a63..581722257f0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go @@ -135,7 +135,7 @@ func namesMatch(goName, jsonName string) bool { if !isAllowedName(goName) || !isAllowedName(jsonName) { return false } - if !strings.EqualFold(goName, jsonName) { + if strings.ToLower(goName) != strings.ToLower(jsonName) { return false } // Go field names must be CamelCase. JSON field names must be camelCase. diff --git a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go index 5fc62977345..0eb3f2360d5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler/handler.go @@ -30,7 +30,6 @@ import ( openapi_v2 "github.com/google/gnostic-models/openapiv2" "github.com/google/uuid" "github.com/munnerz/goautoneg" - klog "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/builder" "k8s.io/kube-openapi/pkg/cached" @@ -60,52 +59,52 @@ type timedSpec struct { // OpenAPIService is the service responsible for serving OpenAPI spec. It has // the ability to safely change the spec while serving it. type OpenAPIService struct { - specCache cached.LastSuccess[*spec.Swagger] - jsonCache cached.Value[timedSpec] - protoCache cached.Value[timedSpec] + specCache cached.Replaceable[*spec.Swagger] + jsonCache cached.Data[timedSpec] + protoCache cached.Data[timedSpec] } // NewOpenAPIService builds an OpenAPIService starting with the given spec. func NewOpenAPIService(swagger *spec.Swagger) *OpenAPIService { - return NewOpenAPIServiceLazy(cached.Static(swagger, uuid.New().String())) + return NewOpenAPIServiceLazy(cached.NewResultOK(swagger, uuid.New().String())) } // NewOpenAPIServiceLazy builds an OpenAPIService from lazy spec. -func NewOpenAPIServiceLazy(swagger cached.Value[*spec.Swagger]) *OpenAPIService { +func NewOpenAPIServiceLazy(swagger cached.Data[*spec.Swagger]) *OpenAPIService { o := &OpenAPIService{} o.UpdateSpecLazy(swagger) - o.jsonCache = cached.Transform[*spec.Swagger](func(spec *spec.Swagger, etag string, err error) (timedSpec, string, error) { - if err != nil { - return timedSpec{}, "", err + o.jsonCache = cached.NewTransformer[*spec.Swagger](func(result cached.Result[*spec.Swagger]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) } - json, err := spec.MarshalJSON() + json, err := result.Data.MarshalJSON() if err != nil { - return timedSpec{}, "", err + return cached.NewResultErr[timedSpec](err) } - return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil + return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) }, &o.specCache) - o.protoCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { - if err != nil { - return timedSpec{}, "", err + o.protoCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) } - proto, err := ToProtoBinary(ts.spec) + proto, err := ToProtoBinary(result.Data.spec) if err != nil { - return timedSpec{}, "", err + return cached.NewResultErr[timedSpec](err) } // We can re-use the same etag as json because of the Vary header. - return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil + return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) }, o.jsonCache) return o } func (o *OpenAPIService) UpdateSpec(swagger *spec.Swagger) error { - o.UpdateSpecLazy(cached.Static(swagger, uuid.New().String())) + o.UpdateSpecLazy(cached.NewResultOK(swagger, uuid.New().String())) return nil } -func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Value[*spec.Swagger]) { - o.specCache.Store(swagger) +func (o *OpenAPIService) UpdateSpecLazy(swagger cached.Data[*spec.Swagger]) { + o.specCache.Replace(swagger) } func ToProtoBinary(json []byte) ([]byte, error) { @@ -131,7 +130,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl Type string SubType string ReturnedContentType string - GetDataAndEtag cached.Value[timedSpec] + GetDataAndEtag cached.Data[timedSpec] }{ {"application", subTypeJSON, "application/" + subTypeJSON, o.jsonCache}, {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf, o.protoCache}, @@ -155,11 +154,11 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl continue } // serve the first matching media type in the sorted clause list - ts, etag, err := accepts.GetDataAndEtag.Get() - if err != nil { - klog.Errorf("Error in OpenAPI handler: %s", err) + result := accepts.GetDataAndEtag.Get() + if result.Err != nil { + klog.Errorf("Error in OpenAPI handler: %s", result.Err) // only return a 503 if we have no older cache data to serve - if ts.spec == nil { + if result.Data.spec == nil { w.WriteHeader(http.StatusServiceUnavailable) return } @@ -168,9 +167,9 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl w.Header().Set("Content-Type", accepts.ReturnedContentType) // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag - w.Header().Set("Etag", strconv.Quote(etag)) + w.Header().Set("Etag", strconv.Quote(result.Etag)) // ServeContent will take care of caching using eTag. - http.ServeContent(w, r, servePath, ts.lastModified, bytes.NewReader(ts.spec)) + http.ServeContent(w, r, servePath, result.Data.lastModified, bytes.NewReader(result.Data.spec)) return } } diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index fc45634887b..2263e2f32b7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -33,7 +33,6 @@ import ( openapi_v3 "github.com/google/gnostic-models/openapiv3" "github.com/google/uuid" "github.com/munnerz/goautoneg" - "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" @@ -74,38 +73,38 @@ type timedSpec struct { // This type is protected by the lock on OpenAPIService. type openAPIV3Group struct { - specCache cached.LastSuccess[*spec3.OpenAPI] - pbCache cached.Value[timedSpec] - jsonCache cached.Value[timedSpec] + specCache cached.Replaceable[*spec3.OpenAPI] + pbCache cached.Data[timedSpec] + jsonCache cached.Data[timedSpec] } func newOpenAPIV3Group() *openAPIV3Group { o := &openAPIV3Group{} - o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) { - if err != nil { - return timedSpec{}, "", err + o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) } - json, err := json.Marshal(spec) + json, err := json.Marshal(result.Data) if err != nil { - return timedSpec{}, "", err + return cached.NewResultErr[timedSpec](err) } - return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil + return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) }, &o.specCache) - o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { - if err != nil { - return timedSpec{}, "", err + o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { + if result.Err != nil { + return cached.NewResultErr[timedSpec](result.Err) } - proto, err := ToV3ProtoBinary(ts.spec) + proto, err := ToV3ProtoBinary(result.Data.spec) if err != nil { - return timedSpec{}, "", err + return cached.NewResultErr[timedSpec](err) } - return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil + return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) }, o.jsonCache) return o } -func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) { - o.specCache.Store(openapi) +func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) { + o.specCache.Replace(openapi) } // OpenAPIService is the service responsible for serving OpenAPI spec. It has @@ -115,7 +114,7 @@ type OpenAPIService struct { mutex sync.Mutex v3Schema map[string]*openAPIV3Group - discoveryCache cached.LastSuccess[timedSpec] + discoveryCache cached.Replaceable[timedSpec] } func computeETag(data []byte) string { @@ -138,20 +137,20 @@ func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} o.v3Schema = make(map[string]*openAPIV3Group) // We're not locked because we haven't shared the structure yet. - o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) return o } -func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { - caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema)) +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { + caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema)) for gvName, group := range o.v3Schema { caches[gvName] = group.jsonCache } - return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) { + return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] { discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} for gvName, result := range results { if result.Err != nil { - return timedSpec{}, "", result.Err + return cached.NewResultErr[timedSpec](result.Err) } discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), @@ -159,9 +158,9 @@ func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { } j, err := json.Marshal(discovery) if err != nil { - return timedSpec{}, "", err + return cached.NewResultErr[timedSpec](err) } - return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil + return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j)) }, caches) } @@ -172,32 +171,32 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } + result := cached.Result[timedSpec]{} switch getType { case subTypeJSON: - ts, etag, err := v.jsonCache.Get() - return ts.spec, etag, ts.lastModified, err + result = v.jsonCache.Get() case subTypeProtobuf, subTypeProtobufDeprecated: - ts, etag, err := v.pbCache.Get() - return ts.spec, etag, ts.lastModified, err + result = v.pbCache.Get() default: return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } + return result.Data.spec, result.Etag, result.Data.lastModified, result.Err } // UpdateGroupVersionLazy adds or updates an existing group with the new cached. -func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) { +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) { o.mutex.Lock() defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { o.v3Schema[group] = newOpenAPIV3Group() // Since there is a new item, we need to re-build the cache map. - o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) } o.v3Schema[group].UpdateSpec(openapi) } func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { - o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String())) + o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String())) } func (o *OpenAPIService) DeleteGroupVersion(group string) { @@ -205,19 +204,19 @@ func (o *OpenAPIService) DeleteGroupVersion(group string) { defer o.mutex.Unlock() delete(o.v3Schema, group) // Rebuild the merge cache map since the items have changed. - o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) } func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - ts, etag, err := o.discoveryCache.Get() - if err != nil { - klog.Errorf("Error serving discovery: %s", err) + result := o.discoveryCache.Get() + if result.Err != nil { + klog.Errorf("Error serving discovery: %s", result.Err) w.WriteHeader(http.StatusInternalServerError) return } - w.Header().Set("Etag", strconv.Quote(etag)) + w.Header().Set("Etag", strconv.Quote(result.Etag)) w.Header().Set("Content-Type", "application/json") - http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec)) + http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index da5485f6a6f..bef60378231 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -22,4 +22,3 @@ var UseOptimizedJSONUnmarshalingV3 bool = true // Used by tests to selectively disable experimental JSON marshaler var UseOptimizedJSONMarshaling bool = true -var UseOptimizedJSONMarshalingV3 bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go b/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go new file mode 100644 index 00000000000..e993fe23d58 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go @@ -0,0 +1,322 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapiconv + +import ( + "strings" + + klog "k8s.io/klog/v2" + builderutil "k8s.io/kube-openapi/pkg/builder3/util" + "k8s.io/kube-openapi/pkg/spec3" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +var OpenAPIV2DefPrefix = "#/definitions/" +var OpenAPIV3DefPrefix = "#/components/schemas/" + +// ConvertV2ToV3 converts an OpenAPI V2 object into V3. +// Certain references may be shared between the V2 and V3 objects in the conversion. +func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI { + v3Spec := &spec3.OpenAPI{ + Version: "3.0.0", + Info: v2Spec.Info, + ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs), + Paths: ConvertPaths(v2Spec.Paths), + Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces), + } + + return v3Spec +} + +func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation { + if v2ED == nil { + return nil + } + return &spec3.ExternalDocumentation{ + ExternalDocumentationProps: spec3.ExternalDocumentationProps{ + Description: v2ED.Description, + URL: v2ED.URL, + }, + } +} + +func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components { + components := &spec3.Components{} + + if v2Definitions != nil { + components.Schemas = make(map[string]*spec.Schema) + } + for s, schema := range v2Definitions { + components.Schemas[s] = ConvertSchema(&schema) + } + if v2SecurityDefinitions != nil { + components.SecuritySchemes = make(spec3.SecuritySchemes) + } + for s, securityScheme := range v2SecurityDefinitions { + components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme) + } + if v2Responses != nil { + components.Responses = make(map[string]*spec3.Response) + } + for r, response := range v2Responses { + components.Responses[r] = ConvertResponse(&response, produces) + } + + return components +} + +func ConvertSchema(v2Schema *spec.Schema) *spec.Schema { + if v2Schema == nil { + return nil + } + v3Schema := spec.Schema{ + VendorExtensible: v2Schema.VendorExtensible, + SchemaProps: v2Schema.SchemaProps, + SwaggerSchemaProps: v2Schema.SwaggerSchemaProps, + ExtraProps: v2Schema.ExtraProps, + } + + if refString := v2Schema.Ref.String(); refString != "" { + if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 { + v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):]) + } else { + klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString) + } + } + + if v2Schema.Properties != nil { + v3Schema.Properties = make(map[string]spec.Schema) + for key, property := range v2Schema.Properties { + v3Schema.Properties[key] = *ConvertSchema(&property) + } + } + if v2Schema.Items != nil { + v3Schema.Items = &spec.SchemaOrArray{ + Schema: ConvertSchema(v2Schema.Items.Schema), + Schemas: ConvertSchemaList(v2Schema.Items.Schemas), + } + } + + if v2Schema.AdditionalProperties != nil { + v3Schema.AdditionalProperties = &spec.SchemaOrBool{ + Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema), + Allows: v2Schema.AdditionalProperties.Allows, + } + } + if v2Schema.AdditionalItems != nil { + v3Schema.AdditionalItems = &spec.SchemaOrBool{ + Schema: ConvertSchema(v2Schema.AdditionalItems.Schema), + Allows: v2Schema.AdditionalItems.Allows, + } + } + + return builderutil.WrapRefs(&v3Schema) +} + +func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema { + if v2SchemaList == nil { + return nil + } + v3SchemaList := []spec.Schema{} + for _, s := range v2SchemaList { + v3SchemaList = append(v3SchemaList, *ConvertSchema(&s)) + } + return v3SchemaList +} + +func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme { + if v2securityScheme == nil { + return nil + } + securityScheme := &spec3.SecurityScheme{ + VendorExtensible: v2securityScheme.VendorExtensible, + SecuritySchemeProps: spec3.SecuritySchemeProps{ + Description: v2securityScheme.Description, + Type: v2securityScheme.Type, + Name: v2securityScheme.Name, + In: v2securityScheme.In, + }, + } + + if v2securityScheme.Flow != "" { + securityScheme.Flows = make(map[string]*spec3.OAuthFlow) + securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{ + OAuthFlowProps: spec3.OAuthFlowProps{ + AuthorizationUrl: v2securityScheme.AuthorizationURL, + TokenUrl: v2securityScheme.TokenURL, + Scopes: v2securityScheme.Scopes, + }, + } + } + return securityScheme +} + +func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths { + if v2Paths == nil { + return nil + } + paths := &spec3.Paths{ + VendorExtensible: v2Paths.VendorExtensible, + } + + if v2Paths.Paths != nil { + paths.Paths = make(map[string]*spec3.Path) + } + for k, v := range v2Paths.Paths { + paths.Paths[k] = ConvertPathItem(v) + } + return paths +} + +func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path { + path := &spec3.Path{ + Refable: v2pathItem.Refable, + PathProps: spec3.PathProps{ + Get: ConvertOperation(v2pathItem.Get), + Put: ConvertOperation(v2pathItem.Put), + Post: ConvertOperation(v2pathItem.Post), + Delete: ConvertOperation(v2pathItem.Delete), + Options: ConvertOperation(v2pathItem.Options), + Head: ConvertOperation(v2pathItem.Head), + Patch: ConvertOperation(v2pathItem.Patch), + }, + VendorExtensible: v2pathItem.VendorExtensible, + } + for _, param := range v2pathItem.Parameters { + path.Parameters = append(path.Parameters, ConvertParameter(param)) + } + return path +} + +func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation { + if v2Operation == nil { + return nil + } + operation := &spec3.Operation{ + VendorExtensible: v2Operation.VendorExtensible, + OperationProps: spec3.OperationProps{ + Description: v2Operation.Description, + ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs), + Tags: v2Operation.Tags, + Summary: v2Operation.Summary, + Deprecated: v2Operation.Deprecated, + OperationId: v2Operation.ID, + }, + } + + for _, param := range v2Operation.Parameters { + if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil { + operation.OperationProps.RequestBody = &spec3.RequestBody{ + RequestBodyProps: spec3.RequestBodyProps{}, + } + if v2Operation.Consumes != nil { + operation.RequestBody.Content = make(map[string]*spec3.MediaType) + } + for _, consumer := range v2Operation.Consumes { + operation.RequestBody.Content[consumer] = &spec3.MediaType{ + MediaTypeProps: spec3.MediaTypeProps{ + Schema: ConvertSchema(param.ParamProps.Schema), + }, + } + } + } else { + operation.Parameters = append(operation.Parameters, ConvertParameter(param)) + } + } + + operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{ + Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces), + }, + VendorExtensible: v2Operation.Responses.VendorExtensible, + } + + if v2Operation.Responses.StatusCodeResponses != nil { + operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response) + } + for k, v := range v2Operation.Responses.StatusCodeResponses { + operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces) + } + return operation +} + +func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response { + if v2Response == nil { + return nil + } + response := &spec3.Response{ + Refable: ConvertRefableResponse(v2Response.Refable), + VendorExtensible: v2Response.VendorExtensible, + ResponseProps: spec3.ResponseProps{ + Description: v2Response.Description, + }, + } + + if v2Response.Schema != nil { + if produces != nil { + response.Content = make(map[string]*spec3.MediaType) + } + for _, producer := range produces { + response.ResponseProps.Content[producer] = &spec3.MediaType{ + MediaTypeProps: spec3.MediaTypeProps{ + Schema: ConvertSchema(v2Response.Schema), + }, + } + } + } + return response +} + +func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter { + param := &spec3.Parameter{ + Refable: ConvertRefableParameter(v2Param.Refable), + VendorExtensible: v2Param.VendorExtensible, + ParameterProps: spec3.ParameterProps{ + Name: v2Param.Name, + Description: v2Param.Description, + In: v2Param.In, + Required: v2Param.Required, + Schema: ConvertSchema(v2Param.Schema), + AllowEmptyValue: v2Param.AllowEmptyValue, + }, + } + // Convert SimpleSchema into Schema + if param.Schema == nil { + param.Schema = &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{v2Param.Type}, + Format: v2Param.Format, + UniqueItems: v2Param.UniqueItems, + }, + } + } + + return param +} + +func ConvertRefableParameter(refable spec.Refable) spec.Refable { + if refable.Ref.String() != "" { + return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))} + } + return refable +} + +func ConvertRefableResponse(refable spec.Refable) spec.Refable { + if refable.Ref.String() != "" { + return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))} + } + return refable +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 9887d185b26..799d866d51f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -214,6 +214,9 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { } } + if union.Discriminator != nil && len(union.Fields) == 0 { + return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator) + } return union, nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 1f62c6e772e..699291f1d8e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -32,9 +32,6 @@ type Encoding struct { // MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON func (e *Encoding) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(e) - } b1, err := json.Marshal(e.EncodingProps) if err != nil { return nil, err @@ -46,16 +43,6 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - EncodingProps encodingPropsOmitZero `json:",inline"` - spec.Extensions - } - x.Extensions = internal.SanitizeExtensions(e.Extensions) - x.EncodingProps = encodingPropsOmitZero(e.EncodingProps) - return opts.MarshalNext(enc, x) -} - func (e *Encoding) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) @@ -95,11 +82,3 @@ type EncodingProps struct { // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } - -type encodingPropsOmitZero struct { - ContentType string `json:"contentType,omitempty"` - Headers map[string]*Header `json:"headers,omitempty"` - Style string `json:"style,omitempty"` - Explode bool `json:"explode,omitzero"` - AllowReserved bool `json:"allowReserved,omitzero"` -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 8834a92e6da..03b8727170f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -36,9 +36,6 @@ type Example struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (e *Example) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(e) - } b1, err := json.Marshal(e.Refable) if err != nil { return nil, err @@ -53,17 +50,6 @@ func (e *Example) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b1, b2, b3), nil } -func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - ExampleProps `json:",inline"` - spec.Extensions - } - x.Ref = e.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(e.Extensions) - x.ExampleProps = e.ExampleProps - return opts.MarshalNext(enc, x) -} func (e *Example) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index f0515496e4d..e79956721ac 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -39,9 +39,6 @@ type ExternalDocumentationProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(e) - } b1, err := json.Marshal(e.ExternalDocumentationProps) if err != nil { return nil, err @@ -53,16 +50,6 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - ExternalDocumentationProps `json:",inline"` - spec.Extensions - } - x.Extensions = internal.SanitizeExtensions(e.Extensions) - x.ExternalDocumentationProps = e.ExternalDocumentationProps - return opts.MarshalNext(enc, x) -} - func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go index 08b6246cebb..bc19dd48ed6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -35,18 +35,6 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ func(o *OpenAPI, c fuzz.Continue) { c.FuzzNoCustom(o) o.Version = "3.0.0" - for i, val := range o.SecurityRequirement { - if val == nil { - o.SecurityRequirement[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - }, func(r *interface{}, c fuzz.Continue) { switch c.Intn(3) { @@ -181,21 +169,6 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ c.Fuzz(&v.ResponseProps) c.Fuzz(&v.VendorExtensible) }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) - // Do not fuzz null values into the array. - for i, val := range v.SecurityRequirement { - if val == nil { - v.SecurityRequirement[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - }, func(v *spec.Extensions, c fuzz.Continue) { numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index 9ea30628ceb..ee5a30f797a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -36,9 +36,6 @@ type Header struct { // MarshalJSON is a custom marshal function that knows how to encode Header as JSON func (h *Header) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(h) - } b1, err := json.Marshal(h.Refable) if err != nil { return nil, err @@ -54,18 +51,6 @@ func (h *Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - HeaderProps headerPropsOmitZero `json:",inline"` - spec.Extensions - } - x.Ref = h.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(h.Extensions) - x.HeaderProps = headerPropsOmitZero(h.HeaderProps) - return opts.MarshalNext(enc, x) -} - func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, h) @@ -124,19 +109,3 @@ type HeaderProps struct { // Examples of the header Examples map[string]*Example `json:"examples,omitempty"` } - -// Marshaling structure only, always edit along with corresponding -// struct (or compilation will fail). -type headerPropsOmitZero struct { - Description string `json:"description,omitempty"` - Required bool `json:"required,omitzero"` - Deprecated bool `json:"deprecated,omitzero"` - AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` - Style string `json:"style,omitempty"` - Explode bool `json:"explode,omitzero"` - AllowReserved bool `json:"allowReserved,omitzero"` - Schema *spec.Schema `json:"schema,omitzero"` - Content map[string]*MediaType `json:"content,omitempty"` - Example interface{} `json:"example,omitempty"` - Examples map[string]*Example `json:"examples,omitempty"` -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index 47eef1edb0a..d390e69bcf7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -35,9 +35,6 @@ type MediaType struct { // MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON func (m *MediaType) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(m) - } b1, err := json.Marshal(m.MediaTypeProps) if err != nil { return nil, err @@ -49,16 +46,6 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - MediaTypeProps mediaTypePropsOmitZero `json:",inline"` - spec.Extensions - } - x.Extensions = internal.SanitizeExtensions(e.Extensions) - x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps) - return opts.MarshalNext(enc, x) -} - func (m *MediaType) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, m) @@ -97,10 +84,3 @@ type MediaTypeProps struct { // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded Encoding map[string]*Encoding `json:"encoding,omitempty"` } - -type mediaTypePropsOmitZero struct { - Schema *spec.Schema `json:"schema,omitzero"` - Example interface{} `json:"example,omitempty"` - Examples map[string]*Example `json:"examples,omitempty"` - Encoding map[string]*Encoding `json:"encoding,omitempty"` -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index f1e10254795..28230610bd4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -35,9 +35,6 @@ type Operation struct { // MarshalJSON is a custom marshal function that knows how to encode Operation as JSON func (o *Operation) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(o) - } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -49,16 +46,6 @@ func (o *Operation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - spec.Extensions - OperationProps operationPropsOmitZero `json:",inline"` - } - x.Extensions = internal.SanitizeExtensions(o.Extensions) - x.OperationProps = operationPropsOmitZero(o.OperationProps) - return opts.MarshalNext(enc, x) -} - // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { @@ -108,17 +95,3 @@ type OperationProps struct { // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } - -type operationPropsOmitZero struct { - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - Description string `json:"description,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` - OperationId string `json:"operationId,omitempty"` - Parameters []*Parameter `json:"parameters,omitempty"` - RequestBody *RequestBody `json:"requestBody,omitzero"` - Responses *Responses `json:"responses,omitzero"` - Deprecated bool `json:"deprecated,omitzero"` - SecurityRequirement []map[string][]string `json:"security,omitempty"` - Servers []*Server `json:"servers,omitempty"` -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index ada7edb6375..613da71a6d3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -36,9 +36,6 @@ type Parameter struct { // MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON func (p *Parameter) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(p) - } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -54,18 +51,6 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - ParameterProps parameterPropsOmitZero `json:",inline"` - spec.Extensions - } - x.Ref = p.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(p.Extensions) - x.ParameterProps = parameterPropsOmitZero(p.ParameterProps) - return opts.MarshalNext(enc, x) -} - func (p *Parameter) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) @@ -129,19 +114,3 @@ type ParameterProps struct { // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding Examples map[string]*Example `json:"examples,omitempty"` } - -type parameterPropsOmitZero struct { - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Description string `json:"description,omitempty"` - Required bool `json:"required,omitzero"` - Deprecated bool `json:"deprecated,omitzero"` - AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` - Style string `json:"style,omitempty"` - Explode bool `json:"explode,omitzero"` - AllowReserved bool `json:"allowReserved,omitzero"` - Schema *spec.Schema `json:"schema,omitzero"` - Content map[string]*MediaType `json:"content,omitempty"` - Example interface{} `json:"example,omitempty"` - Examples map[string]*Example `json:"examples,omitempty"` -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index 16fbbb4dd93..40d9061aceb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -35,41 +35,15 @@ type Paths struct { // MarshalJSON is a custom marshal function that knows how to encode Paths as JSON func (p *Paths) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(p) - } - b1, err := json.Marshal(p.VendorExtensible) + b1, err := json.Marshal(p.Paths) if err != nil { return nil, err } - - pths := make(map[string]*Path) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) + b2, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - m := make(map[string]any, len(p.Extensions)+len(p.Paths)) - for k, v := range p.Extensions { - if internal.IsExtensionKey(k) { - m[k] = v - } - } - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - m[k] = v - } - } - return opts.MarshalNext(enc, m) + return swag.ConcatJSON(b1, b2), nil } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -170,9 +144,6 @@ type Path struct { // MarshalJSON is a custom marshal function that knows how to encode Path as JSON func (p *Path) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(p) - } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -188,18 +159,6 @@ func (p *Path) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - spec.Extensions - PathProps - } - x.Ref = p.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(p.Extensions) - x.PathProps = p.PathProps - return opts.MarshalNext(enc, x) -} - func (p *Path) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 6f8607e4009..33267ce675e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -36,9 +36,6 @@ type RequestBody struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (r *RequestBody) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(r) - } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -54,18 +51,6 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - RequestBodyProps requestBodyPropsOmitZero `json:",inline"` - spec.Extensions - } - x.Ref = r.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(r.Extensions) - x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps) - return opts.MarshalNext(enc, x) -} - func (r *RequestBody) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -92,12 +77,6 @@ type RequestBodyProps struct { Required bool `json:"required,omitempty"` } -type requestBodyPropsOmitZero struct { - Description string `json:"description,omitempty"` - Content map[string]*MediaType `json:"content,omitempty"` - Required bool `json:"required,omitzero"` -} - func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { var x struct { spec.Extensions diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index 73e241fdc9c..95b388e6c60 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -37,9 +37,6 @@ type Responses struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (r *Responses) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(r) - } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -51,25 +48,6 @@ func (r *Responses) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - type ArbitraryKeys map[string]interface{} - var x struct { - ArbitraryKeys - Default *Response `json:"default,omitzero"` - } - x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) - for k, v := range r.Extensions { - if internal.IsExtensionKey(k) { - x.ArbitraryKeys[k] = v - } - } - for k, v := range r.StatusCodeResponses { - x.ArbitraryKeys[strconv.Itoa(k)] = v - } - x.Default = r.Default - return opts.MarshalNext(enc, x) -} - func (r *Responses) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -201,9 +179,6 @@ type Response struct { // MarshalJSON is a custom marshal function that knows how to encode Response as JSON func (r *Response) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(r) - } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -219,18 +194,6 @@ func (r *Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - spec.Extensions - ResponseProps `json:",inline"` - } - x.Ref = r.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(r.Extensions) - x.ResponseProps = r.ResponseProps - return opts.MarshalNext(enc, x) -} - func (r *Response) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -284,9 +247,6 @@ type Link struct { // MarshalJSON is a custom marshal function that knows how to encode Link as JSON func (r *Link) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(r) - } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -302,18 +262,6 @@ func (r *Link) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - spec.Extensions - LinkProps `json:",inline"` - } - x.Ref = r.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(r.Extensions) - x.LinkProps = r.LinkProps - return opts.MarshalNext(enc, x) -} - func (r *Link) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index dd1e98ed881..edf7e6de3f6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -20,8 +20,6 @@ import ( "encoding/json" "github.com/go-openapi/swag" - "k8s.io/kube-openapi/pkg/internal" - jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -34,9 +32,6 @@ type SecurityScheme struct { // MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON func (s *SecurityScheme) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(s) - } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -52,18 +47,6 @@ func (s *SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } -func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - Ref string `json:"$ref,omitempty"` - SecuritySchemeProps `json:",inline"` - spec.Extensions - } - x.Ref = s.Refable.Ref.String() - x.Extensions = internal.SanitizeExtensions(s.Extensions) - x.SecuritySchemeProps = s.SecuritySchemeProps - return opts.MarshalNext(enc, x) -} - // UnmarshalJSON hydrates this items instance with the data from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index 654a42c06e4..d5df0a78110 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -41,9 +41,6 @@ type ServerProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *Server) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(s) - } b1, err := json.Marshal(s.ServerProps) if err != nil { return nil, err @@ -55,16 +52,6 @@ func (s *Server) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - ServerProps `json:",inline"` - spec.Extensions - } - x.Extensions = internal.SanitizeExtensions(s.Extensions) - x.ServerProps = s.ServerProps - return opts.MarshalNext(enc, x) -} - func (s *Server) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) @@ -109,9 +96,6 @@ type ServerVariableProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *ServerVariable) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(s) - } b1, err := json.Marshal(s.ServerVariableProps) if err != nil { return nil, err @@ -123,16 +107,6 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } -func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - var x struct { - ServerVariableProps `json:",inline"` - spec.Extensions - } - x.Extensions = internal.SanitizeExtensions(s.Extensions) - x.ServerVariableProps = s.ServerVariableProps - return opts.MarshalNext(enc, x) -} - func (s *ServerVariable) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index 5db819c7f03..bed096fb76b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -36,8 +36,6 @@ type OpenAPI struct { Servers []*Server `json:"servers,omitempty"` // Components hold various schemas for the specification Components *Components `json:"components,omitempty"` - // SecurityRequirement holds a declaration of which security mechanisms can be used across the API - SecurityRequirement []map[string][]string `json:"security,omitempty"` // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } @@ -50,26 +48,3 @@ func (o *OpenAPI) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &p) } - -func (o *OpenAPI) MarshalJSON() ([]byte, error) { - if internal.UseOptimizedJSONMarshalingV3 { - return internal.DeterministicMarshal(o) - } - type OpenAPIWithNoFunctions OpenAPI - p := (*OpenAPIWithNoFunctions)(o) - return json.Marshal(&p) -} - -func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { - type OpenAPIOmitZero struct { - Version string `json:"openapi"` - Info *spec.Info `json:"info"` - Paths *Paths `json:"paths,omitzero"` - Servers []*Server `json:"servers,omitempty"` - Components *Components `json:"components,omitzero"` - SecurityRequirement []map[string][]string `json:"security,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` - } - x := (*OpenAPIOmitZero)(o) - return opts.MarshalNext(enc, x) -} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go new file mode 100644 index 00000000000..c66f998f519 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go @@ -0,0 +1,502 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ( + "github.com/go-openapi/jsonreference" + "github.com/google/go-cmp/cmp" + fuzz "github.com/google/gofuzz" +) + +var SwaggerFuzzFuncs []interface{} = []interface{}{ + func(v *Responses, c fuzz.Continue) { + c.FuzzNoCustom(v) + if v.Default != nil { + // Check if we hit maxDepth and left an incomplete value + if v.Default.Description == "" { + v.Default = nil + v.StatusCodeResponses = nil + } + } + + // conversion has no way to discern empty statusCodeResponses from + // nil, since "default" is always included in the map. + // So avoid empty responses list + if len(v.StatusCodeResponses) == 0 { + v.StatusCodeResponses = nil + } + }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + + if v != nil { + // force non-nil + v.Responses = &Responses{} + c.Fuzz(v.Responses) + + v.Schemes = nil + if c.RandBool() { + v.Schemes = append(v.Schemes, "http") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "https") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "ws") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "wss") + } + + // Gnostic unconditionally makes security values non-null + // So do not fuzz null values into the array. + for i, val := range v.Security { + if val == nil { + v.Security[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + } + }, + func(v map[int]Response, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + // Prevent negative numbers + num := c.Intn(4) + for i := 0; i < num+2; i++ { + val := Response{} + c.Fuzz(&val) + + val.Description = c.RandString() + "x" + v[100*(i+1)+c.Intn(100)] = val + } + }, + func(v map[string]PathItem, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + num := c.Intn(5) + for i := 0; i < num+2; i++ { + val := PathItem{} + c.Fuzz(&val) + + // Ref params are only allowed in certain locations, so + // possibly add a few to PathItems + numRefsToAdd := c.Intn(5) + for i := 0; i < numRefsToAdd; i++ { + theRef := Parameter{} + c.Fuzz(&theRef.Refable) + + val.Parameters = append(val.Parameters, theRef) + } + + v["/"+c.RandString()] = val + } + }, + func(v *SchemaOrArray, c fuzz.Continue) { + *v = SchemaOrArray{} + // gnostic parser just doesn't support more + // than one Schema here + v.Schema = &Schema{} + c.Fuzz(&v.Schema) + + }, + func(v *SchemaOrBool, c fuzz.Continue) { + *v = SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v map[string]Response, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + // Response definitions are not allowed to + // be refs + for i := 0; i < c.Intn(5)+1; i++ { + resp := &Response{} + + c.Fuzz(resp) + resp.Ref = Ref{} + resp.Description = c.RandString() + "x" + + // Response refs are not vendor extensible by gnostic + resp.VendorExtensible.Extensions = nil + v[c.RandString()+"x"] = *resp + } + }, + func(v *Header, c fuzz.Continue) { + if v != nil { + c.FuzzNoCustom(v) + + // descendant Items of Header may not be refs + cur := v.Items + for cur != nil { + cur.Ref = Ref{} + cur = cur.Items + } + } + }, + func(v *Ref, c fuzz.Continue) { + *v = Ref{} + v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) + }, + func(v *Response, c fuzz.Continue) { + *v = Response{} + if c.RandBool() { + v.Ref = Ref{} + v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) + } else { + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.Schema) + c.Fuzz(&v.ResponseProps) + + v.Headers = nil + v.Ref = Ref{} + + n := 0 + c.Fuzz(&n) + if n != 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + num := c.Intn(4) + for i := 0; i < num; i++ { + if v.Headers == nil { + v.Headers = make(map[string]Header) + } + hdr := Header{} + c.Fuzz(&hdr) + if hdr.Type == "" { + // hit maxDepth, just abort trying to make haders + v.Headers = nil + break + } + v.Headers[c.RandString()+"x"] = hdr + } + } else { + v.Headers = nil + } + } + + v.Description = c.RandString() + "x" + + // Gnostic parses empty as nil, so to keep avoid putting empty + if len(v.Headers) == 0 { + v.Headers = nil + } + }, + func(v **Info, c fuzz.Continue) { + // Info is never nil + *v = &Info{} + c.FuzzNoCustom(*v) + + (*v).Title = c.RandString() + "x" + }, + func(v *Extensions, c fuzz.Continue) { + // gnostic parser only picks up x- vendor extensions + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *Swagger, c fuzz.Continue) { + c.FuzzNoCustom(v) + + if v.Paths == nil { + // Force paths non-nil since it does not have omitempty in json tag. + // This means a perfect roundtrip (via json) is impossible, + // since we can't tell the difference between empty/unspecified paths + v.Paths = &Paths{} + c.Fuzz(v.Paths) + } + + v.Swagger = "2.0" + + // Gnostic support serializing ID at all + // unavoidable data loss + v.ID = "" + + v.Schemes = nil + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "http") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "https") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "ws") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "wss") + } + + // Gnostic unconditionally makes security values non-null + // So do not fuzz null values into the array. + for i, val := range v.Security { + if val == nil { + v.Security[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + v.Description = c.RandString() + "x" + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + v.Type = "basic" + case 1: + v.Type = "apiKey" + switch c.Intn(2) { + case 0: + v.In = "header" + case 1: + v.In = "query" + default: + panic("unreachable") + } + v.Name = "x" + c.RandString() + case 2: + v.Type = "oauth2" + + switch c.Intn(4) { + case 0: + v.Flow = "accessCode" + v.TokenURL = "https://" + c.RandString() + v.AuthorizationURL = "https://" + c.RandString() + case 1: + v.Flow = "application" + v.TokenURL = "https://" + c.RandString() + case 2: + v.Flow = "implicit" + v.AuthorizationURL = "https://" + c.RandString() + case 3: + v.Flow = "password" + v.TokenURL = "https://" + c.RandString() + default: + panic("unreachable") + } + c.Fuzz(&v.Scopes) + default: + panic("unreachable") + } + }, + func(v *interface{}, c fuzz.Continue) { + *v = c.RandString() + "x" + }, + func(v *string, c fuzz.Continue) { + *v = c.RandString() + "x" + }, + func(v *ExternalDocumentation, c fuzz.Continue) { + v.Description = c.RandString() + "x" + v.URL = c.RandString() + "x" + }, + func(v *SimpleSchema, c fuzz.Continue) { + c.FuzzNoCustom(v) + + switch c.Intn(5) { + case 0: + v.Type = "string" + case 1: + v.Type = "number" + case 2: + v.Type = "boolean" + case 3: + v.Type = "integer" + case 4: + v.Type = "array" + default: + panic("unreachable") + } + + switch c.Intn(5) { + case 0: + v.CollectionFormat = "csv" + case 1: + v.CollectionFormat = "ssv" + case 2: + v.CollectionFormat = "tsv" + case 3: + v.CollectionFormat = "pipes" + case 4: + v.CollectionFormat = "" + default: + panic("unreachable") + } + + // None of the types which include SimpleSchema in our definitions + // actually support "example" in the official spec + v.Example = nil + + // unsupported by openapi + v.Nullable = false + }, + func(v *int64, c fuzz.Continue) { + c.Fuzz(v) + + // Gnostic does not differentiate between 0 and non-specified + // so avoid using 0 for fuzzer + if *v == 0 { + *v = 1 + } + }, + func(v *float64, c fuzz.Continue) { + c.Fuzz(v) + + // Gnostic does not differentiate between 0 and non-specified + // so avoid using 0 for fuzzer + if *v == 0.0 { + *v = 1.0 + } + }, + func(v *Parameter, c fuzz.Continue) { + if v == nil { + return + } + c.Fuzz(&v.VendorExtensible) + if c.RandBool() { + // body param + v.Description = c.RandString() + "x" + v.Name = c.RandString() + "x" + v.In = "body" + c.Fuzz(&v.Description) + c.Fuzz(&v.Required) + + v.Schema = &Schema{} + c.Fuzz(&v.Schema) + + } else { + c.Fuzz(&v.SimpleSchema) + c.Fuzz(&v.CommonValidations) + v.AllowEmptyValue = false + v.Description = c.RandString() + "x" + v.Name = c.RandString() + "x" + + switch c.Intn(4) { + case 0: + // Header param + v.In = "header" + case 1: + // Form data param + v.In = "formData" + v.AllowEmptyValue = c.RandBool() + case 2: + // Query param + v.In = "query" + v.AllowEmptyValue = c.RandBool() + case 3: + // Path param + v.In = "path" + v.Required = true + default: + panic("unreachable") + } + + // descendant Items of Parameter may not be refs + cur := v.Items + for cur != nil { + cur.Ref = Ref{} + cur = cur.Items + } + } + }, + func(v *Schema, c fuzz.Continue) { + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + // c.Fuzz(&v.ExtraProps) + // ExtraProps will not roundtrip - gnostic throws out + // unrecognized keys + } + + // Not supported by official openapi v2 spec + // and stripped by k8s apiserver + v.ID = "" + v.AnyOf = nil + v.OneOf = nil + v.Not = nil + v.Nullable = false + v.AdditionalItems = nil + v.Schema = "" + v.PatternProperties = nil + v.Definitions = nil + v.Dependencies = nil + }, +} + +var SwaggerDiffOptions = []cmp.Option{ + // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields + cmp.Comparer(func(a Ref, b Ref) bool { + return a.String() == b.String() + }), +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1f4de11e385..807ec84f06f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -698,21 +698,17 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/cel-go v0.18.2 => github.com/google/cel-go v0.17.7 +# github.com/google/cel-go v0.18.2 => github.com/google/cel-go v0.16.1 ## explicit; go 1.18 github.com/google/cel-go/cel github.com/google/cel-go/checker github.com/google/cel-go/checker/decls github.com/google/cel-go/common -github.com/google/cel-go/common/ast github.com/google/cel-go/common/containers github.com/google/cel-go/common/debug -github.com/google/cel-go/common/decls -github.com/google/cel-go/common/functions github.com/google/cel-go/common/operators github.com/google/cel-go/common/overloads github.com/google/cel-go/common/runes -github.com/google/cel-go/common/stdlib github.com/google/cel-go/common/types github.com/google/cel-go/common/types/pb github.com/google/cel-go/common/types/ref @@ -806,9 +802,6 @@ github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination -# github.com/gorilla/websocket v1.5.1 -## explicit; go 1.20 -github.com/gorilla/websocket # github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware @@ -1074,9 +1067,6 @@ github.com/munnerz/goautoneg # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack -# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f -## explicit -github.com/mxk/go-flowrate/flowrate # github.com/newrelic/newrelic-client-go v1.1.0 ## explicit; go 1.18 github.com/newrelic/newrelic-client-go/internal/http @@ -1904,8 +1894,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.4 => k8s.io/api v0.29.4 -## explicit; go 1.21 +# k8s.io/api v0.29.2 => k8s.io/api v0.28.9 +## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -1938,7 +1928,7 @@ k8s.io/api/discovery/v1beta1 k8s.io/api/events/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 -k8s.io/api/flowcontrol/v1 +k8s.io/api/flowcontrol/v1alpha1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 @@ -1971,8 +1961,8 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.29.4 => k8s.io/apimachinery v0.29.4 -## explicit; go 1.21 +# k8s.io/apimachinery v0.29.2 => k8s.io/apimachinery v0.28.9 +## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -2016,7 +2006,6 @@ k8s.io/apimachinery/pkg/util/managedfields/internal k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net -k8s.io/apimachinery/pkg/util/proxy k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/remotecommand k8s.io/apimachinery/pkg/util/runtime @@ -2034,8 +2023,8 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.4 => k8s.io/apiserver v0.29.4 -## explicit; go 1.21 +# k8s.io/apiserver v0.29.2 => k8s.io/apiserver v0.28.9 +## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel k8s.io/apiserver/pkg/admission/configuration @@ -2065,7 +2054,6 @@ k8s.io/apiserver/pkg/apis/apiserver/install k8s.io/apiserver/pkg/apis/apiserver/v1 k8s.io/apiserver/pkg/apis/apiserver/v1alpha1 k8s.io/apiserver/pkg/apis/apiserver/v1beta1 -k8s.io/apiserver/pkg/apis/apiserver/validation k8s.io/apiserver/pkg/apis/audit k8s.io/apiserver/pkg/apis/audit/install k8s.io/apiserver/pkg/apis/audit/v1 @@ -2079,7 +2067,6 @@ k8s.io/apiserver/pkg/audit k8s.io/apiserver/pkg/audit/policy k8s.io/apiserver/pkg/authentication/authenticator k8s.io/apiserver/pkg/authentication/authenticatorfactory -k8s.io/apiserver/pkg/authentication/cel k8s.io/apiserver/pkg/authentication/group k8s.io/apiserver/pkg/authentication/request/anonymous k8s.io/apiserver/pkg/authentication/request/bearertoken @@ -2093,7 +2080,6 @@ k8s.io/apiserver/pkg/authentication/token/tokenfile k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/authorization/authorizer k8s.io/apiserver/pkg/authorization/authorizerfactory -k8s.io/apiserver/pkg/authorization/cel k8s.io/apiserver/pkg/authorization/path k8s.io/apiserver/pkg/authorization/union k8s.io/apiserver/pkg/cel @@ -2182,8 +2168,8 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v1.5.2 => k8s.io/client-go v0.29.4 -## explicit; go 1.21 +# k8s.io/client-go v1.5.2 => k8s.io/client-go v0.28.9 +## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -2208,7 +2194,7 @@ k8s.io/client-go/applyconfigurations/discovery/v1beta1 k8s.io/client-go/applyconfigurations/events/v1 k8s.io/client-go/applyconfigurations/events/v1beta1 k8s.io/client-go/applyconfigurations/extensions/v1beta1 -k8s.io/client-go/applyconfigurations/flowcontrol/v1 +k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2 k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3 @@ -2274,7 +2260,7 @@ k8s.io/client-go/informers/events/v1beta1 k8s.io/client-go/informers/extensions k8s.io/client-go/informers/extensions/v1beta1 k8s.io/client-go/informers/flowcontrol -k8s.io/client-go/informers/flowcontrol/v1 +k8s.io/client-go/informers/flowcontrol/v1alpha1 k8s.io/client-go/informers/flowcontrol/v1beta1 k8s.io/client-go/informers/flowcontrol/v1beta2 k8s.io/client-go/informers/flowcontrol/v1beta3 @@ -2335,7 +2321,7 @@ k8s.io/client-go/kubernetes/typed/discovery/v1beta1 k8s.io/client-go/kubernetes/typed/events/v1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 -k8s.io/client-go/kubernetes/typed/flowcontrol/v1 +k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 @@ -2381,7 +2367,7 @@ k8s.io/client-go/listers/discovery/v1beta1 k8s.io/client-go/listers/events/v1 k8s.io/client-go/listers/events/v1beta1 k8s.io/client-go/listers/extensions/v1beta1 -k8s.io/client-go/listers/flowcontrol/v1 +k8s.io/client-go/listers/flowcontrol/v1alpha1 k8s.io/client-go/listers/flowcontrol/v1beta1 k8s.io/client-go/listers/flowcontrol/v1beta2 k8s.io/client-go/listers/flowcontrol/v1beta3 @@ -2435,7 +2421,6 @@ k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/events -k8s.io/client-go/tools/internal/events k8s.io/client-go/tools/leaderelection k8s.io/client-go/tools/leaderelection/resourcelock k8s.io/client-go/tools/metrics @@ -2446,7 +2431,6 @@ k8s.io/client-go/tools/reference k8s.io/client-go/tools/remotecommand k8s.io/client-go/transport k8s.io/client-go/transport/spdy -k8s.io/client-go/transport/websocket k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/exec @@ -2455,8 +2439,8 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.29.4 => k8s.io/code-generator v0.29.4 -## explicit; go 1.21 +# k8s.io/code-generator v0.29.2 => k8s.io/code-generator v0.28.9 +## explicit; go 1.20 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen k8s.io/code-generator/cmd/applyconfiguration-gen/args @@ -2493,8 +2477,8 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.29.4 => k8s.io/component-base v0.29.4 -## explicit; go 1.21 +# k8s.io/component-base v0.29.2 => k8s.io/component-base v0.28.9 +## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 @@ -2536,14 +2520,14 @@ k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler k8s.io/klog/v2/klogr -# k8s.io/kms v0.29.4 +# k8s.io/kms v0.29.2 ## explicit; go 1.21 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 k8s.io/kms/pkg/service k8s.io/kms/pkg/util -# k8s.io/kube-openapi v0.0.0-20240126223410-2919ad4fcfec -## explicit; go 1.21 +# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 => k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +## explicit; go 1.19 k8s.io/kube-openapi/cmd/openapi-gen k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/builder @@ -2558,6 +2542,7 @@ k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json +k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 @@ -2568,8 +2553,8 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/metrics v0.29.4 => k8s.io/metrics v0.29.4 -## explicit; go 1.21 +# k8s.io/metrics v0.28.9 => k8s.io/metrics v0.28.9 +## explicit; go 1.20 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/install k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 @@ -2686,7 +2671,7 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml sigs.k8s.io/controller-tools/pkg/version sigs.k8s.io/controller-tools/pkg/webhook -# sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240424182356-4f8ac354df3b +# sigs.k8s.io/custom-metrics-apiserver v1.28.1-0.20240103150633-c0d09c9b6dd1 ## explicit; go 1.21 sigs.k8s.io/custom-metrics-apiserver/pkg/apiserver sigs.k8s.io/custom-metrics-apiserver/pkg/apiserver/endpoints/handlers @@ -2836,17 +2821,18 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/google/cel-go => github.com/google/cel-go v0.17.7 +# github.com/google/cel-go => github.com/google/cel-go v0.16.1 # github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.16.0 # github.com/prometheus/client_model => github.com/prometheus/client_model v0.4.0 # github.com/prometheus/common => github.com/prometheus/common v0.44.0 -# k8s.io/api => k8s.io/api v0.29.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.29.4 -# k8s.io/apiserver => k8s.io/apiserver v0.29.4 -# k8s.io/client-go => k8s.io/client-go v0.29.4 -# k8s.io/code-generator => k8s.io/code-generator v0.29.4 -# k8s.io/component-base => k8s.io/component-base v0.29.4 -# k8s.io/metrics => k8s.io/metrics v0.29.4 +# k8s.io/api => k8s.io/api v0.28.9 +# k8s.io/apimachinery => k8s.io/apimachinery v0.28.9 +# k8s.io/apiserver => k8s.io/apiserver v0.28.9 +# k8s.io/client-go => k8s.io/client-go v0.28.9 +# k8s.io/code-generator => k8s.io/code-generator v0.28.9 +# k8s.io/component-base => k8s.io/component-base v0.28.9 +# k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +# k8s.io/metrics => k8s.io/metrics v0.28.9 # github.com/chzyer/logex => github.com/chzyer/logex v1.2.1 # github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.0 # github.com/golang-jwt/jwt/v4 => github.com/golang-jwt/jwt/v4 v4.5.0 diff --git a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/builder.go b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/builder.go index 1571b148031..7b4fb40a46e 100644 --- a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/builder.go +++ b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/builder.go @@ -26,7 +26,9 @@ import ( apimeta "k8s.io/apimachinery/pkg/api/meta" utilerrors "k8s.io/apimachinery/pkg/util/errors" openapinamer "k8s.io/apiserver/pkg/endpoints/openapi" + "k8s.io/apiserver/pkg/features" genericapiserver "k8s.io/apiserver/pkg/server" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/informers" @@ -81,9 +83,6 @@ type AdapterBase struct { // OpenAPIConfig OpenAPIConfig *openapicommon.Config - // OpenAPIV3Config - OpenAPIV3Config *openapicommon.OpenAPIV3Config - // flagOnce controls initialization of the flags. flagOnce sync.Once @@ -246,7 +245,7 @@ func mergeOpenAPIDefinitions(definitionsGetters []openapicommon.GetOpenAPIDefini } } -func (b *AdapterBase) getAPIDefinitions() openapicommon.GetOpenAPIDefinitions { +func (b *AdapterBase) openAPIConfig(createConfig func(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *openapinamer.DefinitionNamer) *openapicommon.Config) *openapicommon.Config { definitionsGetters := []openapicommon.GetOpenAPIDefinitions{generatedcore.GetOpenAPIDefinitions} if b.cmProvider != nil { definitionsGetters = append(definitionsGetters, generatedcustommetrics.GetOpenAPIDefinitions) @@ -254,21 +253,19 @@ func (b *AdapterBase) getAPIDefinitions() openapicommon.GetOpenAPIDefinitions { if b.emProvider != nil { definitionsGetters = append(definitionsGetters, generatedexternalmetrics.GetOpenAPIDefinitions) } - return mergeOpenAPIDefinitions(definitionsGetters) -} - -func (b *AdapterBase) defaultOpenAPIConfig() *openapicommon.Config { - openAPIConfig := genericapiserver.DefaultOpenAPIConfig(b.getAPIDefinitions(), openapinamer.NewDefinitionNamer(apiserver.Scheme)) + getAPIDefinitions := mergeOpenAPIDefinitions(definitionsGetters) + openAPIConfig := createConfig(getAPIDefinitions, openapinamer.NewDefinitionNamer(apiserver.Scheme)) openAPIConfig.Info.Title = b.Name openAPIConfig.Info.Version = "1.0.0" return openAPIConfig } -func (b *AdapterBase) defaultOpenAPIV3Config() *openapicommon.OpenAPIV3Config { - openAPIConfig := genericapiserver.DefaultOpenAPIV3Config(b.getAPIDefinitions(), openapinamer.NewDefinitionNamer(apiserver.Scheme)) - openAPIConfig.Info.Title = b.Name - openAPIConfig.Info.Version = "1.0.0" - return openAPIConfig +func (b *AdapterBase) defaultOpenAPIConfig() *openapicommon.Config { + return b.openAPIConfig(genericapiserver.DefaultOpenAPIConfig) +} + +func (b *AdapterBase) defaultOpenAPIV3Config() *openapicommon.Config { + return b.openAPIConfig(genericapiserver.DefaultOpenAPIV3Config) } // Config fetches the configuration used to ultimately create the custom metrics adapter's @@ -287,31 +284,21 @@ func (b *AdapterBase) Config() (*apiserver.Config, error) { b.OpenAPIConfig = b.defaultOpenAPIConfig() } b.CustomMetricsAdapterServerOptions.OpenAPIConfig = b.OpenAPIConfig - - if b.OpenAPIV3Config == nil { + if b.OpenAPIV3Config == nil && utilfeature.DefaultFeatureGate.Enabled(features.OpenAPIV3) { b.OpenAPIV3Config = b.defaultOpenAPIV3Config() } - b.CustomMetricsAdapterServerOptions.OpenAPIV3Config = b.OpenAPIV3Config if errList := b.CustomMetricsAdapterServerOptions.Validate(); len(errList) > 0 { return nil, utilerrors.NewAggregate(errList) } - // let's initialize informers if they're not already - _, err := b.Informers() - if err != nil { - return nil, err - } - - serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs) - serverConfig.ClientConfig = b.clientConfig - serverConfig.SharedInformerFactory = b.informers - err = b.CustomMetricsAdapterServerOptions.ApplyTo(serverConfig) + serverConfig := genericapiserver.NewConfig(apiserver.Codecs) + err := b.CustomMetricsAdapterServerOptions.ApplyTo(serverConfig) if err != nil { return nil, err } b.config = &apiserver.Config{ - GenericConfig: &serverConfig.Config, + GenericConfig: serverConfig, } } diff --git a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/options/options.go b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/options/options.go index 12ff5a4e6fb..16f943e9726 100644 --- a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/options/options.go +++ b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/cmd/options/options.go @@ -25,7 +25,6 @@ import ( genericapiserver "k8s.io/apiserver/pkg/server" genericoptions "k8s.io/apiserver/pkg/server/options" - "k8s.io/client-go/kubernetes" openapicommon "k8s.io/kube-openapi/pkg/common" ) @@ -41,7 +40,7 @@ type CustomMetricsAdapterServerOptions struct { Features *genericoptions.FeatureOptions OpenAPIConfig *openapicommon.Config - OpenAPIV3Config *openapicommon.OpenAPIV3Config + OpenAPIV3Config *openapicommon.Config EnableMetrics bool } @@ -82,7 +81,7 @@ func (o *CustomMetricsAdapterServerOptions) AddFlags(fs *pflag.FlagSet) { } // ApplyTo applies CustomMetricsAdapterServerOptions to the server configuration. -func (o *CustomMetricsAdapterServerOptions) ApplyTo(serverConfig *genericapiserver.RecommendedConfig) error { +func (o *CustomMetricsAdapterServerOptions) ApplyTo(serverConfig *genericapiserver.Config) error { // TODO have a "real" external address (have an AdvertiseAddress?) if err := o.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil { return fmt.Errorf("error creating self-signed certificates: %v", err) @@ -97,16 +96,10 @@ func (o *CustomMetricsAdapterServerOptions) ApplyTo(serverConfig *genericapiserv if err := o.Authorization.ApplyTo(&serverConfig.Authorization); err != nil { return err } - if err := o.Audit.ApplyTo(&serverConfig.Config); err != nil { + if err := o.Audit.ApplyTo(serverConfig); err != nil { return err } - - clientset, err := kubernetes.NewForConfig(serverConfig.ClientConfig) - if err != nil { - return err - } - - if err := o.Features.ApplyTo(&serverConfig.Config, clientset, serverConfig.SharedInformerFactory); err != nil { + if err := o.Features.ApplyTo(serverConfig); err != nil { return err } diff --git a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/core/zz_generated.openapi.go b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/core/zz_generated.openapi.go index 15c4dceb1fa..408d36f3d40 100644 --- a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/core/zz_generated.openapi.go +++ b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/core/zz_generated.openapi.go @@ -49,7 +49,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.CinderVolumeSource": schema_k8sio_api_core_v1_CinderVolumeSource(ref), "k8s.io/api/core/v1.ClaimSource": schema_k8sio_api_core_v1_ClaimSource(ref), "k8s.io/api/core/v1.ClientIPConfig": schema_k8sio_api_core_v1_ClientIPConfig(ref), - "k8s.io/api/core/v1.ClusterTrustBundleProjection": schema_k8sio_api_core_v1_ClusterTrustBundleProjection(ref), "k8s.io/api/core/v1.ComponentCondition": schema_k8sio_api_core_v1_ComponentCondition(ref), "k8s.io/api/core/v1.ComponentStatus": schema_k8sio_api_core_v1_ComponentStatus(ref), "k8s.io/api/core/v1.ComponentStatusList": schema_k8sio_api_core_v1_ComponentStatusList(ref), @@ -118,7 +117,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.LoadBalancerStatus": schema_k8sio_api_core_v1_LoadBalancerStatus(ref), "k8s.io/api/core/v1.LocalObjectReference": schema_k8sio_api_core_v1_LocalObjectReference(ref), "k8s.io/api/core/v1.LocalVolumeSource": schema_k8sio_api_core_v1_LocalVolumeSource(ref), - "k8s.io/api/core/v1.ModifyVolumeStatus": schema_k8sio_api_core_v1_ModifyVolumeStatus(ref), "k8s.io/api/core/v1.NFSVolumeSource": schema_k8sio_api_core_v1_NFSVolumeSource(ref), "k8s.io/api/core/v1.Namespace": schema_k8sio_api_core_v1_Namespace(ref), "k8s.io/api/core/v1.NamespaceCondition": schema_k8sio_api_core_v1_NamespaceCondition(ref), @@ -231,7 +229,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref), "k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref), "k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref), - "k8s.io/api/core/v1.SleepAction": schema_k8sio_api_core_v1_SleepAction(ref), "k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref), "k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref), "k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref), @@ -248,7 +245,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/core/v1.VolumeMount": schema_k8sio_api_core_v1_VolumeMount(ref), "k8s.io/api/core/v1.VolumeNodeAffinity": schema_k8sio_api_core_v1_VolumeNodeAffinity(ref), "k8s.io/api/core/v1.VolumeProjection": schema_k8sio_api_core_v1_VolumeProjection(ref), - "k8s.io/api/core/v1.VolumeResourceRequirements": schema_k8sio_api_core_v1_VolumeResourceRequirements(ref), "k8s.io/api/core/v1.VolumeSource": schema_k8sio_api_core_v1_VolumeSource(ref), "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource": schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref), "k8s.io/api/core/v1.WeightedPodAffinityTerm": schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref), @@ -706,7 +702,7 @@ func schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref common.ReferenceCall }, "nodeExpandSecretRef": { SchemaProps: spec.SchemaProps{ - Description: "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + Description: "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This is a beta field which is enabled default by CSINodeExpandSecret feature gate. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", Ref: ref("k8s.io/api/core/v1.SecretReference"), }, }, @@ -1087,57 +1083,6 @@ func schema_k8sio_api_core_v1_ClientIPConfig(ref common.ReferenceCallback) commo } } -func schema_k8sio_api_core_v1_ClusterTrustBundleProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", - Type: []string{"string"}, - Format: "", - }, - }, - "signerName": { - SchemaProps: spec.SchemaProps{ - Description: "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", - Type: []string{"string"}, - Format: "", - }, - }, - "labelSelector": { - SchemaProps: spec.SchemaProps{ - Description: "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Relative path from the volume root to write the bundle.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - func schema_k8sio_api_core_v1_ComponentCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2061,6 +2006,7 @@ func schema_k8sio_api_core_v1_ContainerStateRunning(ref common.ReferenceCallback "startedAt": { SchemaProps: spec.SchemaProps{ Description: "Time at which the container was last (re-)started", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2111,12 +2057,14 @@ func schema_k8sio_api_core_v1_ContainerStateTerminated(ref common.ReferenceCallb "startedAt": { SchemaProps: spec.SchemaProps{ Description: "Time at which previous execution of the container started", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "finishedAt": { SchemaProps: spec.SchemaProps{ Description: "Time at which the container last terminated", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -2246,7 +2194,8 @@ func schema_k8sio_api_core_v1_ContainerStatus(ref common.ReferenceCallback) comm Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -2506,7 +2455,7 @@ func schema_k8sio_api_core_v1_EndpointPort(ref common.ReferenceCallback) common. }, "appProtocol": { SchemaProps: spec.SchemaProps{ - Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", Type: []string{"string"}, Format: "", }, @@ -3426,12 +3375,14 @@ func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPI "firstTimestamp": { SchemaProps: spec.SchemaProps{ Description: "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTimestamp": { SchemaProps: spec.SchemaProps{ Description: "The time at which the most recent occurrence of this event was recorded.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -3452,6 +3403,7 @@ func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPI "eventTime": { SchemaProps: spec.SchemaProps{ Description: "Time when this Event was first observed.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -3567,6 +3519,7 @@ func schema_k8sio_api_core_v1_EventSeries(ref common.ReferenceCallback) common.O "lastObservedTime": { SchemaProps: spec.SchemaProps{ Description: "Time of the last occurrence observed", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), }, }, @@ -4050,6 +4003,7 @@ func schema_k8sio_api_core_v1_HTTPGetAction(ref common.ReferenceCallback) common "port": { SchemaProps: spec.SchemaProps{ Description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -4501,17 +4455,11 @@ func schema_k8sio_api_core_v1_LifecycleHandler(ref common.ReferenceCallback) com Ref: ref("k8s.io/api/core/v1.TCPSocketAction"), }, }, - "sleep": { - SchemaProps: spec.SchemaProps{ - Description: "Sleep represents the duration that the container should sleep before being terminated.", - Ref: ref("k8s.io/api/core/v1.SleepAction"), - }, - }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.SleepAction", "k8s.io/api/core/v1.TCPSocketAction"}, + "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"}, } } @@ -4581,7 +4529,8 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -4595,7 +4544,8 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -4609,7 +4559,8 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -4623,7 +4574,8 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -4637,7 +4589,8 @@ func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) commo Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -4768,7 +4721,8 @@ func schema_k8sio_api_core_v1_List(ref common.ReferenceCallback) common.OpenAPID Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -4804,13 +4758,6 @@ func schema_k8sio_api_core_v1_LoadBalancerIngress(ref common.ReferenceCallback) Format: "", }, }, - "ipMode": { - SchemaProps: spec.SchemaProps{ - Description: "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", - Type: []string{"string"}, - Format: "", - }, - }, "ports": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -4921,36 +4868,6 @@ func schema_k8sio_api_core_v1_LocalVolumeSource(ref common.ReferenceCallback) co } } -func schema_k8sio_api_core_v1_ModifyVolumeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "targetVolumeAttributesClassName": { - SchemaProps: spec.SchemaProps{ - Description: "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.\n\nPossible enum values:\n - `\"InProgress\"` InProgress indicates that the volume is being modified\n - `\"Infeasible\"` Infeasible indicates that the request has been rejected as invalid by the CSI driver. To resolve the error, a valid VolumeAttributesClass needs to be specified\n - `\"Pending\"` Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as the specified VolumeAttributesClass not existing", - Default: "", - Type: []string{"string"}, - Format: "", - Enum: []interface{}{"InProgress", "Infeasible", "Pending"}, - }, - }, - }, - Required: []string{"status"}, - }, - }, - } -} - func schema_k8sio_api_core_v1_NFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -5063,7 +4980,8 @@ func schema_k8sio_api_core_v1_NamespaceCondition(ref common.ReferenceCallback) c }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "reason": { @@ -5350,12 +5268,14 @@ func schema_k8sio_api_core_v1_NodeCondition(ref common.ReferenceCallback) common "lastHeartbeatTime": { SchemaProps: spec.SchemaProps{ Description: "Last time we got an update on a given condition.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transit from one status to another.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -5565,7 +5485,8 @@ func schema_k8sio_api_core_v1_NodeResources(ref common.ReferenceCallback) common Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -5807,7 +5728,8 @@ func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.Op Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -5821,7 +5743,8 @@ func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.Op Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -6265,12 +6188,14 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref common.Referenc "lastProbeTime": { SchemaProps: spec.SchemaProps{ Description: "lastProbeTime is the time we probed the condition.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the time the condition transitioned from one status to another.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -6380,7 +6305,7 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref common.ReferenceCall SchemaProps: spec.SchemaProps{ Description: "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeResourceRequirements"), + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, "volumeName": { @@ -6417,18 +6342,11 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref common.ReferenceCall Ref: ref("k8s.io/api/core/v1.TypedObjectReference"), }, }, - "volumeAttributesClassName": { - SchemaProps: spec.SchemaProps{ - Description: "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", - Type: []string{"string"}, - Format: "", - }, - }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/api/core/v1.TypedObjectReference", "k8s.io/api/core/v1.VolumeResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/api/core/v1.TypedObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } @@ -6470,7 +6388,8 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -6504,7 +6423,8 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -6531,24 +6451,11 @@ func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCa }, }, }, - "currentVolumeAttributesClassName": { - SchemaProps: spec.SchemaProps{ - Description: "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", - Type: []string{"string"}, - Format: "", - }, - }, - "modifyVolumeStatus": { - SchemaProps: spec.SchemaProps{ - Description: "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", - Ref: ref("k8s.io/api/core/v1.ModifyVolumeStatus"), - }, - }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ModifyVolumeStatus", "k8s.io/api/core/v1.PersistentVolumeClaimCondition", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + "k8s.io/api/core/v1.PersistentVolumeClaimCondition", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, } } @@ -6824,7 +6731,8 @@ func schema_k8sio_api_core_v1_PersistentVolumeSpec(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -7032,13 +6940,6 @@ func schema_k8sio_api_core_v1_PersistentVolumeSpec(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.VolumeNodeAffinity"), }, }, - "volumeAttributesClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", - Type: []string{"string"}, - Format: "", - }, - }, }, }, }, @@ -7078,7 +6979,7 @@ func schema_k8sio_api_core_v1_PersistentVolumeStatus(ref common.ReferenceCallbac }, "lastPhaseTransitionTime": { SchemaProps: spec.SchemaProps{ - Description: "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + Description: "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -7221,7 +7122,7 @@ func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) comm Properties: map[string]spec.Schema{ "labelSelector": { SchemaProps: spec.SchemaProps{ - Description: "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", + Description: "A label query over a set of resources, in this case pods.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, @@ -7254,46 +7155,6 @@ func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) comm Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, - "matchLabelKeys": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "mismatchLabelKeys": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, }, Required: []string{"topologyKey"}, }, @@ -7434,12 +7295,14 @@ func schema_k8sio_api_core_v1_PodCondition(ref common.ReferenceCallback) common. "lastProbeTime": { SchemaProps: spec.SchemaProps{ Description: "Last time we probed the condition.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "Last time the condition transitioned from one status to another.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -8457,7 +8320,8 @@ func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenA Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -9027,6 +8891,7 @@ func schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref common.ReferenceCallback) "evictionTime": { SchemaProps: spec.SchemaProps{ Description: "Time at which this entry was added to the list.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -9586,6 +9451,7 @@ func schema_k8sio_api_core_v1_ReplicationControllerCondition(ref common.Referenc "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "The last time the condition transitioned from one status to another.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -9838,6 +9704,7 @@ func schema_k8sio_api_core_v1_ResourceFieldSelector(ref common.ReferenceCallback "divisor": { SchemaProps: spec.SchemaProps{ Description: "Specifies the output format of the exposed resources, defaults to \"1\"", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -9971,7 +9838,8 @@ func schema_k8sio_api_core_v1_ResourceQuotaSpec(ref common.ReferenceCallback) co Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -10021,7 +9889,8 @@ func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -10035,7 +9904,8 @@ func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -10064,7 +9934,8 @@ func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -10078,7 +9949,8 @@ func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, }, @@ -11192,7 +11064,7 @@ func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.O }, "appProtocol": { SchemaProps: spec.SchemaProps{ - Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + Description: "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", Type: []string{"string"}, Format: "", }, @@ -11208,6 +11080,7 @@ func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.O "targetPort": { SchemaProps: spec.SchemaProps{ Description: "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -11554,28 +11427,6 @@ func schema_k8sio_api_core_v1_SessionAffinityConfig(ref common.ReferenceCallback } } -func schema_k8sio_api_core_v1_SleepAction(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SleepAction describes a \"sleep\" action.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "seconds": { - SchemaProps: spec.SchemaProps{ - Description: "Seconds is the number of seconds to sleep.", - Default: 0, - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - Required: []string{"seconds"}, - }, - }, - } -} - func schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -11714,6 +11565,7 @@ func schema_k8sio_api_core_v1_TCPSocketAction(ref common.ReferenceCallback) comm "port": { SchemaProps: spec.SchemaProps{ Description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), }, }, @@ -12414,60 +12266,11 @@ func schema_k8sio_api_core_v1_VolumeProjection(ref common.ReferenceCallback) com Ref: ref("k8s.io/api/core/v1.ServiceAccountTokenProjection"), }, }, - "clusterTrustBundle": { - SchemaProps: spec.SchemaProps{ - Description: "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", - Ref: ref("k8s.io/api/core/v1.ClusterTrustBundleProjection"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ClusterTrustBundleProjection", "k8s.io/api/core/v1.ConfigMapProjection", "k8s.io/api/core/v1.DownwardAPIProjection", "k8s.io/api/core/v1.SecretProjection", "k8s.io/api/core/v1.ServiceAccountTokenProjection"}, - } -} - -func schema_k8sio_api_core_v1_VolumeResourceRequirements(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "VolumeResourceRequirements describes the storage resource requirements for a volume.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "limits": { - SchemaProps: spec.SchemaProps{ - Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "requests": { - SchemaProps: spec.SchemaProps{ - Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, }, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + "k8s.io/api/core/v1.ConfigMapProjection", "k8s.io/api/core/v1.DownwardAPIProjection", "k8s.io/api/core/v1.SecretProjection", "k8s.io/api/core/v1.ServiceAccountTokenProjection"}, } } @@ -13255,6 +13058,7 @@ func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.Open "lastTransitionTime": { SchemaProps: spec.SchemaProps{ Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -13810,7 +13614,8 @@ func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, }, @@ -14096,6 +13901,7 @@ func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.Ope "creationTimestamp": { SchemaProps: spec.SchemaProps{ Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -14877,6 +14683,7 @@ func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenA "object": { SchemaProps: spec.SchemaProps{ Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, @@ -15075,6 +14882,7 @@ func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.Ope "object": { SchemaProps: spec.SchemaProps{ Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), }, }, diff --git a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/custommetrics/zz_generated.openapi.go b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/custommetrics/zz_generated.openapi.go index 0f15ec96872..7b973d645c9 100644 --- a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/custommetrics/zz_generated.openapi.go +++ b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/custommetrics/zz_generated.openapi.go @@ -120,6 +120,7 @@ func schema_pkg_apis_custom_metrics_v1beta1_MetricValue(ref common.ReferenceCall "timestamp": { SchemaProps: spec.SchemaProps{ Description: "indicates the time at which the metrics were produced", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -133,6 +134,7 @@ func schema_pkg_apis_custom_metrics_v1beta1_MetricValue(ref common.ReferenceCall "value": { SchemaProps: spec.SchemaProps{ Description: "the value of the metric for this", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, @@ -309,6 +311,7 @@ func schema_pkg_apis_custom_metrics_v1beta2_MetricValue(ref common.ReferenceCall "timestamp": { SchemaProps: spec.SchemaProps{ Description: "indicates the time at which the metrics were produced", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -322,6 +325,7 @@ func schema_pkg_apis_custom_metrics_v1beta2_MetricValue(ref common.ReferenceCall "value": { SchemaProps: spec.SchemaProps{ Description: "the value of the metric for this", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, }, diff --git a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/externalmetrics/zz_generated.openapi.go b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/externalmetrics/zz_generated.openapi.go index e66566202e3..e3598d97ae3 100644 --- a/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/externalmetrics/zz_generated.openapi.go +++ b/vendor/sigs.k8s.io/custom-metrics-apiserver/pkg/generated/openapi/externalmetrics/zz_generated.openapi.go @@ -83,6 +83,7 @@ func schema_pkg_apis_external_metrics_v1beta1_ExternalMetricValue(ref common.Ref "timestamp": { SchemaProps: spec.SchemaProps{ Description: "indicates the time at which the metrics were produced", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, @@ -96,6 +97,7 @@ func schema_pkg_apis_external_metrics_v1beta1_ExternalMetricValue(ref common.Ref "value": { SchemaProps: spec.SchemaProps{ Description: "the value of the metric", + Default: map[string]interface{}{}, Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), }, },