From 7652d42fa159e9988983cc6f725057bcffdf2f1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Jan 2024 16:00:55 +0000 Subject: [PATCH] Bump sigs.k8s.io/node-feature-discovery from 0.14.2 to 0.15.1 Bumps [sigs.k8s.io/node-feature-discovery](https://github.com/kubernetes-sigs/node-feature-discovery) from 0.14.2 to 0.15.1. - [Release notes](https://github.com/kubernetes-sigs/node-feature-discovery/releases) - [Commits](https://github.com/kubernetes-sigs/node-feature-discovery/compare/v0.14.2...v0.15.1) --- updated-dependencies: - dependency-name: sigs.k8s.io/node-feature-discovery dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 59 +- go.sum | 95 +- vendor/github.com/go-logr/logr/README.md | 113 +- vendor/github.com/go-logr/logr/SECURITY.md | 18 + vendor/github.com/go-logr/logr/funcr/funcr.go | 48 +- vendor/github.com/go-logr/logr/logr.go | 35 +- .../go-logr/logr/slogr/sloghandler.go | 168 + vendor/github.com/go-logr/logr/slogr/slogr.go | 108 + .../github.com/go-logr/logr/slogr/slogsink.go | 122 + vendor/github.com/onsi/gomega/CHANGELOG.md | 5 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- vendor/github.com/onsi/gomega/matchers.go | 37 +- .../gomega/matchers/match_error_matcher.go | 25 +- vendor/go.opentelemetry.io/otel/.gitignore | 3 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 17 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 70 +- vendor/go.opentelemetry.io/otel/Makefile | 29 +- vendor/go.opentelemetry.io/otel/README.md | 15 +- .../otel/baggage/baggage.go | 4 +- .../otel/internal/global/instruments.go | 60 +- .../otel/internal/global/trace.go | 7 + vendor/go.opentelemetry.io/otel/metric/doc.go | 2 +- .../otel/metric/instrument.go | 23 + .../otel/metric/syncfloat64.go | 10 +- .../otel/metric/syncint64.go | 10 +- .../otel/propagation/trace_context.go | 6 +- .../go.opentelemetry.io/otel/requirements.txt | 2 +- .../go.opentelemetry.io/otel/trace/config.go | 1 + vendor/go.opentelemetry.io/otel/trace/doc.go | 64 + .../otel/trace/embedded/embedded.go | 56 + vendor/go.opentelemetry.io/otel/trace/noop.go | 10 +- .../go.opentelemetry.io/otel/trace/trace.go | 40 +- .../otel/trace/tracestate.go | 38 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 4 +- vendor/golang.org/x/oauth2/deviceauth.go | 198 + vendor/golang.org/x/oauth2/internal/token.go | 70 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/pkce.go | 68 + vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sync/errgroup/go120.go | 1 - .../golang.org/x/sync/errgroup/pre_go120.go | 1 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 + vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 6 + vendor/golang.org/x/time/rate/rate.go | 2 + .../appengine/internal/api.go | 347 +- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../appengine/internal/transaction.go | 10 +- .../appengine/urlfetch/urlfetch.go | 9 +- vendor/k8s.io/klog/v2/.golangci.yaml | 6 + .../k8s.io/klog/v2/internal/buffer/buffer.go | 12 +- vendor/k8s.io/klog/v2/internal/clock/clock.go | 21 +- .../klog/v2/internal/serialize/keyvalues.go | 71 +- .../internal/serialize/keyvalues_no_slog.go | 97 + .../v2/internal/serialize/keyvalues_slog.go | 155 + .../internal/sloghandler/sloghandler_slog.go | 96 + .../doc.go => klog/v2/k8s_references_slog.go} | 31 +- vendor/k8s.io/klog/v2/klog.go | 66 +- vendor/k8s.io/klog/v2/klog_file.go | 4 +- vendor/k8s.io/klog/v2/klogr.go | 46 +- vendor/k8s.io/klog/v2/klogr_slog.go | 96 + .../kube-openapi/pkg/builder3/util/util.go | 51 - .../k8s.io/kube-openapi/pkg/cached/cache.go | 268 +- .../k8s.io/kube-openapi/pkg/common/common.go | 38 - .../kube-openapi/pkg/handler3/handler.go | 77 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 1 + .../kube-openapi/pkg/openapiconv/convert.go | 322 - .../kube-openapi/pkg/schemamutation/walker.go | 519 -- .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 21 + .../k8s.io/kube-openapi/pkg/spec3/example.go | 14 + .../pkg/spec3/external_documentation.go | 13 + vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 27 + .../k8s.io/kube-openapi/pkg/spec3/header.go | 31 + .../kube-openapi/pkg/spec3/media_type.go | 20 + .../kube-openapi/pkg/spec3/operation.go | 27 + .../kube-openapi/pkg/spec3/parameter.go | 31 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 47 +- .../kube-openapi/pkg/spec3/request_body.go | 21 + .../k8s.io/kube-openapi/pkg/spec3/response.go | 52 + .../kube-openapi/pkg/spec3/security_scheme.go | 17 + .../k8s.io/kube-openapi/pkg/spec3/server.go | 26 + vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 25 + .../kube-openapi/pkg/validation/spec/fuzz.go | 502 -- vendor/k8s.io/kubernetes/LICENSE | 202 - vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS | 4 - .../pkg/apis/core/annotation_key_constants.go | 145 - .../pkg/apis/core/helper/helpers.go | 502 -- .../k8s.io/kubernetes/pkg/apis/core/json.go | 31 - .../pkg/apis/core/objectreference.go | 37 - .../kubernetes/pkg/apis/core/register.go | 102 - .../kubernetes/pkg/apis/core/resource.go | 58 - .../k8s.io/kubernetes/pkg/apis/core/taint.go | 42 - .../kubernetes/pkg/apis/core/toleration.go | 30 - .../k8s.io/kubernetes/pkg/apis/core/types.go | 6148 ---------------- .../pkg/apis/core/zz_generated.deepcopy.go | 6256 ----------------- vendor/k8s.io/utils/pointer/pointer.go | 283 +- vendor/k8s.io/utils/ptr/OWNERS | 10 + vendor/k8s.io/utils/ptr/README.md | 3 + vendor/k8s.io/utils/ptr/ptr.go | 73 + vendor/modules.txt | 79 +- .../apis/nfd/v1alpha1/annotations_labels.go | 10 + .../pkg/apis/nfd/v1alpha1/expression.go | 480 -- .../pkg/apis/nfd/v1alpha1/feature.go | 18 +- .../pkg/apis/nfd/v1alpha1/rule.go | 268 - .../pkg/apis/nfd/v1alpha1/types.go | 35 +- .../nfd/v1alpha1/zz_generated.deepcopy.go | 96 +- .../node-feature-discovery/pkg/utils/dump.go | 48 - .../node-feature-discovery/pkg/utils/flags.go | 212 - .../pkg/utils/fswatcher.go | 159 - .../pkg/utils/grpc_log.go | 84 - .../pkg/utils/hostpath/hostpath.go | 45 - .../pkg/utils/kubernetes.go | 45 - .../pkg/utils/memory_resources.go | 151 - .../node-feature-discovery/pkg/utils/tls.go | 71 - .../v4/fieldpath/pathelementmap.go | 45 +- .../v4/merge/conflict.go | 2 +- .../structured-merge-diff/v4/merge/update.go | 72 +- .../v4/schema/elements.go | 3 +- .../v4/schema/schemaschema.go | 3 +- .../structured-merge-diff/v4/typed/compare.go | 460 ++ .../structured-merge-diff/v4/typed/helpers.go | 21 +- .../structured-merge-diff/v4/typed/merge.go | 61 +- .../structured-merge-diff/v4/typed/parser.go | 12 +- .../structured-merge-diff/v4/typed/remove.go | 4 +- .../v4/typed/tofieldset.go | 24 +- .../structured-merge-diff/v4/typed/typed.go | 187 +- .../structured-merge-diff/v4/typed/union.go | 276 - .../v4/typed/validate.go | 14 +- .../v4/value/mapreflect.go | 2 +- .../v4/value/mapunstructured.go | 8 +- .../v4/value/reflectcache.go | 4 +- 140 files changed, 3738 insertions(+), 18234 deletions(-) create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md create mode 100644 vendor/github.com/go-logr/logr/slogr/sloghandler.go create mode 100644 vendor/github.com/go-logr/logr/slogr/slogr.go create mode 100644 vendor/github.com/go-logr/logr/slogr/slogsink.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 vendor/golang.org/x/oauth2/pkce.go create mode 100644 vendor/k8s.io/klog/v2/.golangci.yaml create mode 100644 vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go create mode 100644 vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go create mode 100644 vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go rename vendor/k8s.io/{kubernetes/pkg/apis/core/doc.go => klog/v2/k8s_references_slog.go} (51%) create mode 100644 vendor/k8s.io/klog/v2/klogr_slog.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go delete mode 100644 vendor/k8s.io/kubernetes/LICENSE delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/json.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/resource.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/taint.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/utils/ptr/OWNERS create mode 100644 vendor/k8s.io/utils/ptr/README.md create mode 100644 vendor/k8s.io/utils/ptr/ptr.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/expression.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/rule.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/dump.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/flags.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/fswatcher.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/grpc_log.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath/hostpath.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/kubernetes.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/memory_resources.go delete mode 100644 vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/tls.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go diff --git a/go.mod b/go.mod index 5a61685d9..30441b0dd 100644 --- a/go.mod +++ b/go.mod @@ -39,20 +39,20 @@ require ( github.com/google/uuid v1.4.0 github.com/mittwald/go-helm-client v0.12.3 github.com/onsi/ginkgo/v2 v2.13.0 - github.com/onsi/gomega v1.28.1 + github.com/onsi/gomega v1.29.0 github.com/prometheus/procfs v0.12.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.8.4 github.com/urfave/cli/v2 v2.25.7 - golang.org/x/net v0.18.0 + golang.org/x/net v0.19.0 google.golang.org/grpc v1.59.0 - k8s.io/api v0.28.3 - k8s.io/apiextensions-apiserver v0.28.2 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 - k8s.io/klog/v2 v2.100.1 - k8s.io/kubelet v0.28.2 - sigs.k8s.io/node-feature-discovery v0.14.2 + k8s.io/api v0.29.0 + k8s.io/apiextensions-apiserver v0.29.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v0.29.0 + k8s.io/klog/v2 v2.110.1 + k8s.io/kubelet v0.29.0 + sigs.k8s.io/node-feature-discovery v0.15.1 sigs.k8s.io/yaml v1.4.0 tags.cncf.io/container-device-interface v0.6.2 ) @@ -89,7 +89,7 @@ require ( github.com/frankban/quicktest v1.14.4 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -159,37 +159,36 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.opentelemetry.io/otel v1.20.0 // indirect + go.opentelemetry.io/otel/metric v1.20.0 // indirect + go.opentelemetry.io/otel/trace v1.20.0 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/crypto v0.15.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.14.1-0.20231113162313-11eadc05e9bf // indirect - golang.org/x/term v0.14.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/oauth2 v0.14.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.14.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.16.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect helm.sh/helm/v3 v3.13.1 // indirect - k8s.io/apiserver v0.28.3 // indirect + k8s.io/apiserver v0.29.0 // indirect k8s.io/cli-runtime v0.28.3 // indirect - k8s.io/component-base v0.28.3 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/kubectl v0.28.2 // indirect - k8s.io/kubernetes v1.28.3 // indirect - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect + k8s.io/component-base v0.29.0 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.29.0 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect ) diff --git a/go.sum b/go.sum index 8886ffd5d..5545217ab 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,8 @@ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2 github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= @@ -123,10 +123,9 @@ github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpj github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= @@ -165,6 +164,7 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= @@ -314,8 +314,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= -github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -427,12 +427,12 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc= +go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs= +go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA= +go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM= +go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ= +go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -441,8 +441,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -450,15 +450,14 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -466,11 +465,11 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -478,8 +477,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -501,23 +500,23 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.1-0.20231113162313-11eadc05e9bf h1:deuYF+UqKuU1sIonGbTXrdnTWUeMsYydHZY1w1FGHIg= -golang.org/x/sys v0.14.1-0.20231113162313-11eadc05e9bf/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= -golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -527,21 +526,21 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -595,18 +594,16 @@ k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k= k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE= k8s.io/kubelet v0.28.3 h1:bp/uIf1R5F61BlFvFtzc4PDEiK7TtFcw3wFJlc0V0LM= k8s.io/kubelet v0.28.3/go.mod h1:E3NHYbp/v45Ao6AD0EOZnqO3L0R6Haks6Nm0+bnFwtU= -k8s.io/kubernetes v1.28.3 h1:XTci6gzk+JR51UZuZQCFJ4CsyUkfivSjLI4O1P9z6LY= -k8s.io/kubernetes v1.28.3/go.mod h1:NhAysZWvHtNcJFFHic87ofxQN7loylCQwg3ZvXVDbag= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= @@ -615,10 +612,10 @@ sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKU sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= -sigs.k8s.io/node-feature-discovery v0.14.2 h1:NBlNg93rf8Ad5USYTDhPMkr9oLp2pd84vXZDX5hhJxs= -sigs.k8s.io/node-feature-discovery v0.14.2/go.mod h1:0Qk0F7mLlKLoJbu8W8LFFoQXqp07aPq3R5w/C4LUtpc= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/node-feature-discovery v0.15.1 h1:d+VEgp1qXQhO7GPOv1dHBnJWi5liky6KSMrv+oKVBeg= +sigs.k8s.io/node-feature-discovery v0.15.1/go.mod h1:trXFyMt5uQJwxa/UGqgyCKHDYC12thkqmt71tjII/SI= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ab5931181..a8c29bfbd 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -1,6 +1,7 @@ # A minimal logging API for Go [![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) logr offers an(other) opinion on how Go programs and libraries can do logging without becoming coupled to a particular logging implementation. This is not @@ -73,6 +74,29 @@ received: If the Go standard library had defined an interface for logging, this project probably would not be needed. Alas, here we are. +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) +package. + ### Inspiration Before you consider this package, please read [this blog post by the @@ -118,6 +142,91 @@ There are implementations for the following logging libraries: - **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) - **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and +`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. `slogr` itself leaves that to the caller. + +## Using a `logr.Sink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +## Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `slogr.SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +## Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this +to fill this gap: + + func HandlerFromContext(ctx context.Context) slog.Handler { + logger, err := logr.FromContext(ctx) + if err == nil { + return slogr.NewSlogHandler(logger) + } + return slog.Default().Handler() + } + + func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { + return logr.NewContext(ctx, slogr.NewLogr(handler)) + } + +The downside is that storing and retrieving a `slog.Handler` needs more +allocations compared to using a `logr.Logger`. Therefore the recommendation is +to use the `logr.Logger` API in code which uses contextual logging. + ## FAQ ### Conceptual @@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this", Then gradually choose levels in between as you need them, working your way down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs.) +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). #### How do I choose my keys? diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 000000000..1ca756fc7 --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index e52f0cd01..12e5807cc 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -116,17 +116,17 @@ type Options struct { // Equivalent hooks are offered for key-value pairs saved via // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} + RenderBuiltinsHook func(kvList []any) []any // RenderValuesHook is the same as RenderBuiltinsHook, except that it is // only called for key-value pairs saved via logr.Logger.WithValues. See // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} + RenderValuesHook func(kvList []any) []any // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only // called for key-value pairs passed directly to Info and Error. See // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} + RenderArgsHook func(kvList []any) []any // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct // that contains a struct, etc.) it may log. Every time it finds a struct, @@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink { return &l } -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { l.Formatter.AddValues(kvList) return &l } @@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink { return &l } -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { +func (l fnlogger) Info(level int, msg string, kvList ...any) { prefix, args := l.FormatInfo(level, msg, kvList) l.write(prefix, args) } -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { +func (l fnlogger) Error(err error, msg string, kvList ...any) { prefix, args := l.FormatError(err, msg, kvList) l.write(prefix, args) } @@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { type Formatter struct { outputFormat outputFormat prefix string - values []interface{} + values []any valuesStr string depth int opts *Options @@ -246,10 +246,10 @@ const ( ) // PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} +type PseudoStruct []any // render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { +func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { @@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string { // This function returns a potentially modified version of kvList, which // ensures that there is a value for every key (adding a value if needed) and // that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { // This logic overlaps with sanitize() but saves one type-cast per key, // which can be measurable. if len(kvList)%2 != 0 { @@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b return kvList } -func (f Formatter) pretty(value interface{}) string { +func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -343,7 +343,7 @@ const ( ) // TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { if depth > f.opts.MaxLogDepth { return `""` } @@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool { return false } -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { +func invokeMarshaler(m logr.Marshaler) (ret any) { defer func() { if r := recover(); r != nil { ret = fmt.Sprintf("", r) @@ -675,12 +675,12 @@ func (f Formatter) caller() Caller { const noValue = "" -func (f Formatter) nonStringKey(v interface{}) string { +func (f Formatter) nonStringKey(v any) string { return fmt.Sprintf("", f.snippet(v)) } // snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { +func (f Formatter) snippet(v any) string { const snipLen = 16 snip := f.pretty(v) @@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string { // sanitize ensures that a list of key-value pairs has a value for every key // (adding a value if needed) and that each key is a string (substituting a key // if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { +func (f Formatter) sanitize(kvList []any) []any { if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } @@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int { // FormatInfo renders an Info log message into strings. The prefix will be // empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref } // FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is +// empty when no names were set (via AddNames), or when the output is // configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf prefix = f.prefix if f.outputFormat == outputJSON { args = append(args, "logger", prefix) @@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre args = append(args, "caller", f.caller()) } args = append(args, "msg", msg) - var loggableErr interface{} + var loggableErr any if err != nil { loggableErr = err.Error() } args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) + return prefix, f.render(args, kvList) } // AddName appends the specified name. funcr uses '/' characters to separate @@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) { // AddValues adds key-value pairs to the set of saved values to be logged with // each log line. -func (f *Formatter) AddValues(kvList []interface{}) { +func (f *Formatter) AddValues(kvList []any) { // Three slice args forces a copy. n := len(f.values) f.values = append(f.values[:n:n], kvList...) diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index e027aea3f..2a5075a18 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -127,9 +127,9 @@ limitations under the License. // such a value can call its methods without having to check whether the // instance is ready for use. // -// Calling methods with the null logger (Logger{}) as instance will crash -// because it has no LogSink. Therefore this null logger should never be passed -// around. For cases where passing a logger is optional, a pointer to Logger +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger // should be used. // // # Key Naming Conventions @@ -258,6 +258,12 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. return l.sink != nil && l.sink.Enabled(l.level) } @@ -267,11 +273,11 @@ func (l Logger) Enabled() bool { // line. The key/value pairs can then be used to add additional variable // information. The key/value pairs must alternate string keys and arbitrary // values. -func (l Logger) Info(msg string, keysAndValues ...interface{}) { +func (l Logger) Info(msg string, keysAndValues ...any) { if l.sink == nil { return } - if l.Enabled() { + if l.sink.Enabled(l.level) { // see comment in Enabled if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // while the err argument should be used to attach the actual error that // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. -func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { +func (l Logger) Error(err error, msg string, keysAndValues ...any) { if l.sink == nil { return } @@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger { return l } +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. -func (l Logger) WithValues(keysAndValues ...interface{}) Logger { +func (l Logger) WithValues(keysAndValues ...any) Logger { if l.sink == nil { return l } @@ -467,15 +480,15 @@ type LogSink interface { // The level argument is provided for optional logging. This method will // only be called when Enabled(level) is true. See Logger.Info for more // details. - Info(level int, msg string, keysAndValues ...interface{}) + Info(level int, msg string, keysAndValues ...any) // Error logs an error, with the given message and key/value pairs as // context. See Logger.Error for more details. - Error(err error, msg string, keysAndValues ...interface{}) + Error(err error, msg string, keysAndValues ...any) // WithValues returns a new LogSink with additional key/value pairs. See // Logger.WithValues for more details. - WithValues(keysAndValues ...interface{}) LogSink + WithValues(keysAndValues ...any) LogSink // WithName returns a new LogSink with the specified name appended. See // Logger.WithName for more details. @@ -546,5 +559,5 @@ type Marshaler interface { // with exported fields // // It may return any value of any type. - MarshalLog() interface{} + MarshalLog() any } diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/slogr/sloghandler.go new file mode 100644 index 000000000..ec6725ce2 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/sloghandler.go @@ -0,0 +1,168 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink logr.LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because NewLogr needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() logr.LogSink { + if sink, ok := l.sink.(logr.CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithAttrs(attrs) + copy.sink = copy.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + } + copy.sink = l.sink.WithValues(kvList...) + } + return © +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithGroup(name) + copy.sink = l.slogSink + } else { + copy.groupPrefix = copy.addGroupPrefix(name) + } + return © +} + +func (l *slogHandler) addGroupPrefix(name string) string { + if l.groupPrefix == "" { + return name + } + return l.groupPrefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a logr.LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original logr.Logger had a V level + if result < 0 { + result = 0 // because logr.LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go new file mode 100644 index 000000000..eb519ae23 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -0,0 +1,108 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slogr enables usage of a slog.Handler with logr.Logger as front-end +// API and of a logr.LogSink through the slog.Handler and thus slog.Logger +// APIs. +// +// See the README in the top-level [./logr] package for a discussion of +// interoperability. +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +// NewLogr returns a logr.Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func NewLogr(handler slog.Handler) logr.Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return logr.Discard() + } + return logr.New(handler.sink).V(int(handler.levelBias)) + } + return logr.New(&slogSink{handler: handler}) +} + +// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the logr.Logger: +// +// logger := +// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the logr.Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func NewSlogHandler(logger logr.Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in NewLogr +// and NewSlogHandler. +type SlogSink interface { + logr.LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogr/slogsink.go new file mode 100644 index 000000000..6fbac561d --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogsink.go @@ -0,0 +1,122 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + "runtime" + "time" + + "github.com/go-logr/logr" +) + +var ( + _ logr.LogSink = &slogSink{} + _ logr.CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewLogr. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) logr.LogSink { + if l.name != "" { + l.name = l.name + "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 4f512a435..4fc45f29c 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,8 @@ +## 1.29.0 + +### Features +- MatchError can now take an optional func(error) bool + description [2b39142] + ## 1.28.1 ### Maintenance diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 0625053ef..ba082146a 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.28.1" +const GOMEGA_VERSION = "1.29.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 88f100432..cd3f431d2 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -88,19 +88,44 @@ func Succeed() types.GomegaMatcher { } // MatchError succeeds if actual is a non-nil error that matches the passed in -// string, error, or matcher. +// string, error, function, or matcher. // // These are valid use-cases: // -// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) -// Expect(err).Should(MatchError(ContainSubstring("sprocket not found"))) // asserts that err.Error() contains substring "sprocket not found" +// When passed a string: +// +// Expect(err).To(MatchError("an error")) +// +// asserts that err.Error() == "an error" +// +// When passed an error: +// +// Expect(err).To(MatchError(SomeError)) +// +// First checks if errors.Is(err, SomeError). +// If that fails then it checks if reflect.DeepEqual(err, SomeError) repeatedly for err and any errors wrapped by err +// +// When passed a matcher: +// +// Expect(err).To(MatchError(ContainSubstring("sprocket not found"))) +// +// the matcher is passed err.Error(). In this case it asserts that err.Error() contains substring "sprocket not found" +// +// When passed a func(err) bool and a description: +// +// Expect(err).To(MatchError(os.IsNotExist, "IsNotExist")) +// +// the function is passed err and matches if the return value is true. The description is required to allow Gomega +// to print a useful error message. // // It is an error for err to be nil or an object that does not implement the // Error interface -func MatchError(expected interface{}) types.GomegaMatcher { +// +// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases. +func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ - Expected: expected, + Expected: expected, + FuncErrDescription: functionErrorDescription, } } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index 827475ea5..c539dd389 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -9,10 +9,14 @@ import ( ) type MatchErrorMatcher struct { - Expected interface{} + Expected any + FuncErrDescription []any + isFunc bool } -func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) { + matcher.isFunc = false + if isNil(actual) { return false, fmt.Errorf("Expected an error, got nil") } @@ -42,6 +46,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e return actualErr.Error() == expected, nil } + v := reflect.ValueOf(expected) + t := v.Type() + errorInterface := reflect.TypeOf((*error)(nil)).Elem() + if t.Kind() == reflect.Func && t.NumIn() == 1 && t.In(0).Implements(errorInterface) && t.NumOut() == 1 && t.Out(0).Kind() == reflect.Bool { + if len(matcher.FuncErrDescription) == 0 { + return false, fmt.Errorf("MatchError requires an additional description when passed a function") + } + matcher.isFunc = true + return v.Call([]reflect.Value{reflect.ValueOf(actualErr)})[0].Bool(), nil + } + var subMatcher omegaMatcher var hasSubMatcher bool if expected != nil { @@ -57,9 +72,15 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e } func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { + if matcher.isFunc { + return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0])) + } return format.Message(actual, "to match error", matcher.Expected) } func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if matcher.isFunc { + return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0])) + } return format.Message(actual, "not to match error", matcher.Expected) } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index f3355c852..924805565 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -18,8 +18,9 @@ gen/ /example/fib/traces.txt /example/jaeger/jaeger /example/namedtracer/namedtracer +/example/otel-collector/otel-collector /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus +/example/view/view /example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6e8eeec00..a62511f38 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -12,8 +12,9 @@ linters: - depguard - errcheck - godot - - gofmt + - gofumpt - goimports + - gosec - gosimple - govet - ineffassign @@ -53,6 +54,20 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec include: # revive exported should have comment or be unexported. - EXC0012 diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 3e5c35b5d..c4e7ad475 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,71 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + ## [1.19.0/0.42.0/0.0.7] 2023-09-28 This release contains the first stable release of the OpenTelemetry Go [metric SDK]. @@ -2656,7 +2721,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.20.0...HEAD +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 [1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 @@ -2731,7 +2797,7 @@ It contains api and sdk for trace and meter. [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 -[Go 1.19]: https://go.dev/doc/go1.19 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 5c311706b..35fc18996 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl GORELEASE = $(TOOLS)/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + .PHONY: tools tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) @@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint @@ -216,7 +231,7 @@ go-mod-tidy/%: | crosslink lint-modules: go-mod-tidy .PHONY: lint -lint: misspell lint-modules golangci-lint +lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: | $(PORTO) @@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO) misspell: | $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + .PHONY: codespell codespell: | $(CODESPELL) @$(DOCKERPY) $(CODESPELL) @@ -289,3 +312,7 @@ COMMIT ?= "HEAD" add-tags: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 634326ef8..2c5b0cc28 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s ## Project Status -| Signal | Status | Project | -|---------|------------|-----------------------| -| Traces | Stable | N/A | -| Metrics | Mixed [1] | [Go: Metric SDK (GA)] | -| Logs | Frozen [2] | N/A | +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | -[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34 - -- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta. -- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK. +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). No Logs Pull Requests are currently being accepted. Progress and status specific to this repository is tracked in our diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 9e6b3b7b5..84532cb1d 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - decodedValue, err := url.QueryUnescape(value) + decodedValue, err := url.PathUnescape(value) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) { // when converting the header into a data structure." key = strings.TrimSpace(k) var err error - value, err = url.QueryUnescape(strings.TrimSpace(v)) + value, err = url.PathUnescape(strings.TrimSpace(v)) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", err, value) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index a33eded87..ebb13c206 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -34,11 +34,13 @@ type afCounter struct { name string opts []metric.Float64ObservableCounterOption - delegate atomic.Value //metric.Float64ObservableCounter + delegate atomic.Value // metric.Float64ObservableCounter } -var _ unwrapper = (*afCounter)(nil) -var _ metric.Float64ObservableCounter = (*afCounter)(nil) +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) func (i *afCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableCounter(i.name, i.opts...) @@ -63,11 +65,13 @@ type afUpDownCounter struct { name string opts []metric.Float64ObservableUpDownCounterOption - delegate atomic.Value //metric.Float64ObservableUpDownCounter + delegate atomic.Value // metric.Float64ObservableUpDownCounter } -var _ unwrapper = (*afUpDownCounter)(nil) -var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) func (i *afUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) @@ -92,11 +96,13 @@ type afGauge struct { name string opts []metric.Float64ObservableGaugeOption - delegate atomic.Value //metric.Float64ObservableGauge + delegate atomic.Value // metric.Float64ObservableGauge } -var _ unwrapper = (*afGauge)(nil) -var _ metric.Float64ObservableGauge = (*afGauge)(nil) +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) func (i *afGauge) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableGauge(i.name, i.opts...) @@ -121,11 +127,13 @@ type aiCounter struct { name string opts []metric.Int64ObservableCounterOption - delegate atomic.Value //metric.Int64ObservableCounter + delegate atomic.Value // metric.Int64ObservableCounter } -var _ unwrapper = (*aiCounter)(nil) -var _ metric.Int64ObservableCounter = (*aiCounter)(nil) +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) func (i *aiCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableCounter(i.name, i.opts...) @@ -150,11 +158,13 @@ type aiUpDownCounter struct { name string opts []metric.Int64ObservableUpDownCounterOption - delegate atomic.Value //metric.Int64ObservableUpDownCounter + delegate atomic.Value // metric.Int64ObservableUpDownCounter } -var _ unwrapper = (*aiUpDownCounter)(nil) -var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) @@ -179,11 +189,13 @@ type aiGauge struct { name string opts []metric.Int64ObservableGaugeOption - delegate atomic.Value //metric.Int64ObservableGauge + delegate atomic.Value // metric.Int64ObservableGauge } -var _ unwrapper = (*aiGauge)(nil) -var _ metric.Int64ObservableGauge = (*aiGauge)(nil) +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) func (i *aiGauge) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableGauge(i.name, i.opts...) @@ -208,7 +220,7 @@ type sfCounter struct { name string opts []metric.Float64CounterOption - delegate atomic.Value //metric.Float64Counter + delegate atomic.Value // metric.Float64Counter } var _ metric.Float64Counter = (*sfCounter)(nil) @@ -234,7 +246,7 @@ type sfUpDownCounter struct { name string opts []metric.Float64UpDownCounterOption - delegate atomic.Value //metric.Float64UpDownCounter + delegate atomic.Value // metric.Float64UpDownCounter } var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) @@ -260,7 +272,7 @@ type sfHistogram struct { name string opts []metric.Float64HistogramOption - delegate atomic.Value //metric.Float64Histogram + delegate atomic.Value // metric.Float64Histogram } var _ metric.Float64Histogram = (*sfHistogram)(nil) @@ -286,7 +298,7 @@ type siCounter struct { name string opts []metric.Int64CounterOption - delegate atomic.Value //metric.Int64Counter + delegate atomic.Value // metric.Int64Counter } var _ metric.Int64Counter = (*siCounter)(nil) @@ -312,7 +324,7 @@ type siUpDownCounter struct { name string opts []metric.Int64UpDownCounterOption - delegate atomic.Value //metric.Int64UpDownCounter + delegate atomic.Value // metric.Int64UpDownCounter } var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) @@ -338,7 +350,7 @@ type siHistogram struct { name string opts []metric.Int64HistogramOption - delegate atomic.Value //metric.Int64Histogram + delegate atomic.Value // metric.Int64Histogram } var _ metric.Int64Histogram = (*siHistogram)(nil) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 5f008d098..3f61ec12a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -39,6 +39,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" ) // tracerProvider is a placeholder for a configured SDK TracerProvider. @@ -46,6 +47,8 @@ import ( // All TracerProvider functionality is forwarded to a delegate once // configured. type tracerProvider struct { + embedded.TracerProvider + mtx sync.Mutex tracers map[il]*tracer delegate trace.TracerProvider @@ -119,6 +122,8 @@ type il struct { // All Tracer functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopTracer. type tracer struct { + embedded.Tracer + name string opts []trace.TracerOption provider *tracerProvider @@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // SpanContext. It performs no operations other than to return the wrapped // SpanContext. type nonRecordingSpan struct { + embedded.Span + sc trace.SpanContext tracer *tracer } diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index ae24e448d..54716e13b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric]. Finally, an author can embed another implementation in theirs. The embedded implementation will be used for methods not defined by the author. For example, -an author who want to default to silently dropping the call can use +an author who wants to default to silently dropping the call can use [go.opentelemetry.io/otel/metric/noop]: import "go.opentelemetry.io/otel/metric/noop" diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index cdca00058..be89cd533 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -39,6 +39,12 @@ type InstrumentOption interface { Float64ObservableGaugeOption } +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + type descOpt string func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { @@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. func WithUnit(u string) InstrumentOption { return unitOpt(u) } +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + // AddOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as an AddOption. type AddOption interface { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index f0b063721..0a4825ae6 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -147,8 +147,9 @@ type Float64Histogram interface { // Float64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Float64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all @@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Float64HistogramOption applies options to a [Float64HistogramConfig]. See // [InstrumentOption] for other options that can be used as a // Float64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 6f508eb66..56667d32f 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -147,8 +147,9 @@ type Int64Histogram interface { // Int64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Int64HistogramConfig struct { - description string - unit string + description string + unit string + explicitBucketBoundaries []float64 } // NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts @@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string { return c.unit } +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + // Int64HistogramOption applies options to a [Int64HistogramConfig]. See // [InstrumentOption] for other options that can be used as an // Int64HistogramOption. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 902692da0..75a8f3435 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -40,8 +40,10 @@ const ( // their proprietary information. type TraceContext struct{} -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +var ( + _ TextMapPropagator = TraceContext{} + traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") +) // Inject set tracecontext from the Context into the carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index ddff45468..e0a43e138 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.5 +codespell==2.2.6 diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index cb3efbb9a..3aadc66cf 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index ab0346f96..440f3d756 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -62,5 +62,69 @@ a default. defer span.End() // ... } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 000000000..898db5a75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 7cf6c7f3e..c125491ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -19,16 +19,20 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } -type noopTracerProvider struct{} +type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} @@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { } // noopTracer is an implementation of Tracer that performs no operations. -type noopTracer struct{} +type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} @@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption } // noopSpan is an implementation of Span that performs no operations. -type noopSpan struct{} +type noopSpan struct{ embedded.Span } var _ Span = noopSpan{} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 4aa94f79f..26a4b2260 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -48,8 +49,10 @@ func (e errorConst) Error() string { // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. @@ -71,8 +74,10 @@ func (t TraceID) String() string { // SpanID is a unique identity of a span in a trace. type SpanID [8]byte -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. @@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this @@ -486,8 +498,15 @@ func (sk SpanKind) String() string { // Tracer is the creator of Spans. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created @@ -518,8 +537,15 @@ type Tracer interface { // at runtime from its users or it can simply use the globally registered one // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + // Tracer returns a unique Tracer scoped to be used by instrumentation code // to trace computational workflows. The scope and identity of that // instrumentation code is uniquely defined by the name and options passed. diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index ca68a82e5..d1e47ca2f 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -28,9 +28,9 @@ const ( // based on the W3C Trace Context specification, see // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -40,9 +40,10 @@ const ( ) var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) + withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) + valueRe = regexp.MustCompile(`^` + valueFormat + `$`) + memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ) type member struct { @@ -51,10 +52,19 @@ type member struct { } func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { + if len(key) > 256 { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } - if !valueRe.MatchString(value) { + if !noTenantKeyRe.MatchString(key) { + if !withTenantKeyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + atIndex := strings.LastIndex(key, "@") + if atIndex > 241 || len(key)-1-atIndex > 14 { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + } + if len(value) > 256 || !valueRe.MatchString(value) { return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) } return member{Key: key, Value: value}, nil @@ -62,14 +72,14 @@ func newMember(key, value string) (member, error) { func parseMember(m string) (member, error) { matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { + if len(matches) != 3 { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil + result, e := newMember(matches[1], matches[2]) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil } // String encodes member into a string compliant with the W3C Trace Context diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ad64e1996..5a92f1d4b 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.19.0" + return "1.20.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 7d2127692..82366e799 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.19.0 + version: v1.20.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -35,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.42.0 + version: v0.43.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 58901bda5..e83ddeef0 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -115,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe3..90a2c3d6d 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5ffce9764..5bbb33217 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go index 7d419d376..f93c740b6 100644 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.20 -// +build go1.20 package errgroup diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go index 1795c18ac..88ce33434 100644 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.20 -// +build !go1.20 package errgroup diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c0851..dbe680eab 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 5dda44503..6202638ba 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -519,6 +519,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 1781cfca3..c73cfe2f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1801,6 +1801,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index b07276a1b..bbf8399ff 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3005,6 +3005,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index f0e0cf3cb..8f6c7f493 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20..0569f5dd4 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e3..87c33c798 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b..5b95c13d9 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e42..0f95aa91d 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e..5ad3548bf 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e..4201b6b58 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d8067263..18ddda3a4 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312f..afd0ae84f 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a333..86a8caf06 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae653..2ae8ab9fa 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d9..6c0d72418 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 000000000..0d77d65f0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index f325ded5e..46de00fb0 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -30,14 +30,16 @@ import ( var ( // Pid is inserted into log headers. Can be overridden for tests. Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time ) // Buffer holds a single byte.Buffer for reuse. The zero value is ready for // use. It also provides some helper methods for output formatting. type Buffer struct { bytes.Buffer - Tmp [64]byte // temporary byte array for creating headers. - next *Buffer + Tmp [64]byte // temporary byte array for creating headers. } var buffers = sync.Pool{ @@ -122,6 +124,9 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] @@ -157,6 +162,9 @@ func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go index b8b6af5c8..cc11bb480 100644 --- a/vendor/k8s.io/klog/v2/internal/clock/clock.go +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -39,16 +39,6 @@ type Clock interface { // Sleep sleeps for the provided duration d. // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. Sleep(d time.Duration) - // Tick returns the channel of a new Ticker. - // This method does not allow to free/GC the backing ticker. Use - // NewTicker from WithTicker instead. - Tick(d time.Duration) <-chan time.Time -} - -// WithTicker allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type WithTicker interface { - Clock // NewTicker returns a new Ticker. NewTicker(time.Duration) Ticker } @@ -66,7 +56,7 @@ type WithDelayedExecution interface { // WithTickerAndDelayedExecution allows for injecting fake or real clocks // into code that needs Ticker and AfterFunc functionality type WithTickerAndDelayedExecution interface { - WithTicker + Clock // AfterFunc executes f in its own goroutine after waiting // for d duration and returns a Timer whose channel can be // closed by calling Stop() on the Timer. @@ -79,7 +69,7 @@ type Ticker interface { Stop() } -var _ = WithTicker(RealClock{}) +var _ Clock = RealClock{} // RealClock really calls time.Now() type RealClock struct{} @@ -115,13 +105,6 @@ func (RealClock) AfterFunc(d time.Duration, f func()) Timer { } } -// Tick is the same as time.Tick(d) -// This method does not allow to free/GC the backing ticker. Use -// NewTicker instead. -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - // NewTicker returns a new Ticker. func (RealClock) NewTicker(d time.Duration) Ticker { return &realTicker{ diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index bcdf5f8ee..d1a4751c9 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -172,73 +172,6 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { Formatter{}.KVListFormat(b, keysAndValues...) } -// KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case textWriter: - writeTextWriterValue(b, v) - case fmt.Stringer: - writeStringValue(b, StringerToString(v)) - case string: - writeStringValue(b, v) - case error: - writeStringValue(b, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, value) - default: - f.formatAny(b, value) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - f.formatAny(b, v) - } -} - func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } @@ -251,6 +184,10 @@ func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { b.WriteString(f.AnyToStringHook(v)) return } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { encoder := json.NewEncoder(b) l := b.Len() if err := encoder.Encode(v); err != nil { diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 000000000..d9c7d1546 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 000000000..89acf9772 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 000000000..21f1697d0 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go similarity index 51% rename from vendor/k8s.io/kubernetes/pkg/apis/core/doc.go rename to vendor/k8s.io/klog/v2/k8s_references_slog.go index 6475fdab1..5522c84c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -1,5 +1,8 @@ +//go:build go1.21 +// +build go1.21 + /* -Copyright 2014 The Kubernetes Authors. +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +17,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} -// Package core contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package core // import "k8s.io/kubernetes/pkg/apis/core" +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 152f8a6bd..72502db3a 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -415,7 +415,7 @@ func init() { logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -518,9 +518,7 @@ type settings struct { func (s settings) deepCopy() settings { // vmodule is a slice and would be shared, so we have copy it. filter := make([]modulePat, len(s.vmodule.filter)) - for i := range s.vmodule.filter { - filter[i] = s.vmodule.filter[i] - } + copy(filter, s.vmodule.filter) s.vmodule.filter = filter if s.logger != nil { @@ -657,16 +655,15 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin } } } - return l.formatHeader(s, file, line), file, line + return l.formatHeader(s, file, line, timeNow()), file, line } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { buf := buffer.GetBuffer() if l.skipHeaders { return buf } - now := timeNow() buf.FormatHeader(s, file, line, now) return buf } @@ -676,6 +673,10 @@ func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFil } func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -696,7 +697,15 @@ func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilte } func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + buf, file, line := l.header(s, depth) + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -719,6 +728,10 @@ func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilt } func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -741,7 +754,7 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter Lo // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) + buf := l.formatHeader(s, file, line, timeNow()) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -759,7 +772,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, fil l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } -// if loggr is specified, will call loggr.Error, otherwise output with logging module. +// if logger is specified, will call logger.Error, otherwise output with logging module. func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -771,7 +784,7 @@ func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } -// if loggr is specified, will call loggr.Info, otherwise output with logging module. +// if logger is specified, will call logger.Info, otherwise output with logging module. func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -783,7 +796,7 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } -// printS is called from infoS and errorS if loggr is not specified. +// printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. @@ -796,7 +809,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) } @@ -873,6 +886,9 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu if logger.writeKlogBuffer != nil { logger.writeKlogBuffer(data) } else { + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { @@ -897,7 +913,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu l.exit(err) } } - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -907,20 +923,20 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } if l.oneOutput { - l.file[s].Write(data) + _, _ = l.file[s].Write(data) } else { switch s { case severity.FatalLog: - l.file[severity.FatalLog].Write(data) + _, _ = l.file[severity.FatalLog].Write(data) fallthrough case severity.ErrorLog: - l.file[severity.ErrorLog].Write(data) + _, _ = l.file[severity.ErrorLog].Write(data) fallthrough case severity.WarningLog: - l.file[severity.WarningLog].Write(data) + _, _ = l.file[severity.WarningLog].Write(data) fallthrough case severity.InfoLog: - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } } } @@ -946,7 +962,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) + _, _ = f.Write(trace) } } l.mu.Unlock() @@ -1102,7 +1118,7 @@ const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. type flushDaemon struct { mu sync.Mutex - clock clock.WithTicker + clock clock.Clock flush func() stopC chan struct{} stopDone chan struct{} @@ -1110,7 +1126,7 @@ type flushDaemon struct { // newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a // clock.RealClock is used. -func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { if tickClock == nil { tickClock = clock.RealClock{} } @@ -1201,8 +1217,8 @@ func (l *loggingT) flushAll() { for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error + _ = file.Flush() // ignore error + _ = file.Sync() // ignore error } } if logging.loggerOptions.flush != nil { @@ -1281,9 +1297,7 @@ func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } + file = strings.TrimSuffix(file, ".go") if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index 1025d644f..8bee16204 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -109,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 15de00e21..efec96fd4 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -22,6 +22,11 @@ import ( "k8s.io/klog/v2/internal/serialize" ) +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + // NewKlogr returns a logger that is functionally identical to // klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The // difference is that it uses a simpler implementation. @@ -32,10 +37,15 @@ func NewKlogr() Logger { // klogger is a subset of klogr/klogr.go. It had to be copied to break an // import cycle (klogr wants to use klog, and klog wants to use klogr). type klogger struct { - level int callDepth int - prefix string - values []interface{} + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string } func (l *klogger) Init(info logr.RuntimeInfo) { @@ -44,34 +54,40 @@ func (l *klogger) Init(info logr.RuntimeInfo) { func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l *klogger) Enabled(level int) bool { - // Skip this function and logr.Logger.Info where Enabled is called. - return VDepth(l.callDepth+2, Level(level)).Enabled() + return VDepth(l.callDepth+1, Level(level)).Enabled() } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr -// uses '/' characters to separate name elements. Callers should not pass '/' +// uses '.' characters to separate name elements. Callers should not pass '.' // in the provided name string, but this library does not actually enforce that. func (l klogger) WithName(name string) logr.LogSink { - if len(l.prefix) > 0 { - l.prefix = l.prefix + "/" + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true } - l.prefix += name return &l } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 000000000..f7bf74030 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr/slogr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) slogr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ slogr.SlogSink = &klogger{} diff --git a/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go b/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go deleted file mode 100644 index e01566925..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/builder3/util/util.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "reflect" - - "k8s.io/kube-openapi/pkg/schemamutation" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// wrapRefs wraps OpenAPI V3 Schema refs that contain sibling elements. -// AllOf is used to wrap the Ref to prevent references from having sibling elements -// Please see https://github.com/kubernetes/kubernetes/issues/106387#issuecomment-967640388 -func WrapRefs(schema *spec.Schema) *spec.Schema { - walker := schemamutation.Walker{ - SchemaCallback: func(schema *spec.Schema) *spec.Schema { - orig := schema - clone := func() { - if orig == schema { - schema = new(spec.Schema) - *schema = *orig - } - } - if schema.Ref.String() != "" && !reflect.DeepEqual(*schema, spec.Schema{SchemaProps: spec.SchemaProps{Ref: schema.Ref}}) { - clone() - refSchema := new(spec.Schema) - refSchema.Ref = schema.Ref - schema.Ref = spec.Ref{} - schema.AllOf = []spec.Schema{*refSchema} - } - return schema - }, - RefCallback: schemamutation.RefCallbackNoop, - } - return walker.WalkSchema(schema) -} diff --git a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go index 76415b783..a66fe8a09 100644 --- a/vendor/k8s.io/kube-openapi/pkg/cached/cache.go +++ b/vendor/k8s.io/kube-openapi/pkg/cached/cache.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package cache provides a cache mechanism based on etags to lazily +// Package cached provides a cache mechanism based on etags to lazily // build, and/or cache results from expensive operation such that those // operations are not repeated unnecessarily. The operations can be // created as a tree, and replaced dynamically as needed. @@ -25,16 +25,18 @@ limitations under the License. // // This package uses a source/transform/sink model of caches to build // the dependency tree, and can be used as follows: -// - [NewSource]: A source cache that recomputes the content every time. -// - [NewStaticSource]: A source cache that always produces the +// - [Func]: A source cache that recomputes the content every time. +// - [Once]: A source cache that always produces the // same content, it is only called once. -// - [NewTransformer]: A cache that transforms data from one format to +// - [Transform]: A cache that transforms data from one format to // another. It's only refreshed when the source changes. -// - [NewMerger]: A cache that aggregates multiple caches into one. +// - [Merge]: A cache that aggregates multiple caches in a map into one. // It's only refreshed when the source changes. -// - [Replaceable]: A cache adapter that can be atomically -// replaced with a new one, and saves the previous results in case an -// error pops-up. +// - [MergeList]: A cache that aggregates multiple caches in a list into one. +// It's only refreshed when the source changes. +// - [Atomic]: A cache adapter that atomically replaces the source with a new one. +// - [LastSuccess]: A cache adapter that caches the last successful and returns +// it if the next call fails. It extends [Atomic]. // // # Etags // @@ -54,61 +56,45 @@ import ( "sync/atomic" ) -// Result is the content returned from a call to a cache. It can either -// be created with [NewResultOK] if the call was a success, or -// [NewResultErr] if the call resulted in an error. +// Value is wrapping a value behind a getter for lazy evaluation. +type Value[T any] interface { + Get() (value T, etag string, err error) +} + +// Result is wrapping T and error into a struct for cases where a tuple is more +// convenient or necessary in Golang. type Result[T any] struct { - Data T - Etag string - Err error + Value T + Etag string + Err error } -// NewResultOK creates a new [Result] for a successful operation. -func NewResultOK[T any](data T, etag string) Result[T] { - return Result[T]{ - Data: data, - Etag: etag, - } +func (r Result[T]) Get() (T, string, error) { + return r.Value, r.Etag, r.Err } -// NewResultErr creates a new [Result] when an error has happened. -func NewResultErr[T any](err error) Result[T] { - return Result[T]{ - Err: err, - } +// Func wraps a (thread-safe) function as a Value[T]. +func Func[T any](fn func() (T, string, error)) Value[T] { + return valueFunc[T](fn) } -// Result can be treated as a [Data] if necessary. -func (r Result[T]) Get() Result[T] { - return r +type valueFunc[T any] func() (T, string, error) + +func (c valueFunc[T]) Get() (T, string, error) { + return c() } -// Data is a cache that performs an action whose result data will be -// cached. It also returns an "etag" identifier to version the cache, so -// that the caller can know if they have the most recent version of the -// cache (and can decide to cache some operation based on that). -// -// The [NewMerger] and [NewTransformer] automatically handle -// that for you by checking if the etag is updated before calling the -// merging or transforming function. -type Data[T any] interface { - // Returns the cached data, as well as an "etag" to identify the - // version of the cache, or an error if something happened. - Get() Result[T] +// Static returns constant values. +func Static[T any](value T, etag string) Value[T] { + return Result[T]{Value: value, Etag: etag} } -// NewMerger creates a new merge cache, a cache that merges the result -// of other caches. The function only gets called if any of the -// dependency has changed. +// Merge merges a of cached values. The merge function only gets called if any of +// the dependency has changed. // // If any of the dependency returned an error before, or any of the // dependency returned an error this time, or if the mergeFn failed -// before, then the function is reran. -// -// The caches and results are mapped by K so that associated data can be -// retrieved. The map of dependencies can not be modified after -// creation, and a new merger should be created (and probably replaced -// using a [Replaceable]). +// before, then the function is run again. // // Note that this assumes there is no "partial" merge, the merge // function will remerge all the dependencies together everytime. Since @@ -118,18 +104,19 @@ type Data[T any] interface { // Also note that Golang map iteration is not stable. If the mergeFn // depends on the order iteration to be stable, it will need to // implement its own sorting or iteration order. -func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Result[V], caches map[K]Data[T]) Data[V] { - listCaches := make([]Data[T], 0, len(caches)) - // maps from index to key +func Merge[K comparable, T, V any](mergeFn func(results map[K]Result[T]) (V, string, error), caches map[K]Value[T]) Value[V] { + list := make([]Value[T], 0, len(caches)) + + // map from index to key indexes := make(map[int]K, len(caches)) i := 0 for k := range caches { - listCaches = append(listCaches, caches[k]) + list = append(list, caches[k]) indexes[i] = k i++ } - return NewListMerger(func(results []Result[T]) Result[V] { + return MergeList(func(results []Result[T]) (V, string, error) { if len(results) != len(indexes) { panic(fmt.Errorf("invalid result length %d, expected %d", len(results), len(indexes))) } @@ -138,20 +125,11 @@ func NewMerger[K comparable, T, V any](mergeFn func(results map[K]Result[T]) Res m[indexes[i]] = results[i] } return mergeFn(m) - }, listCaches) -} - -type listMerger[T, V any] struct { - lock sync.Mutex - mergeFn func([]Result[T]) Result[V] - caches []Data[T] - cacheResults []Result[T] - result Result[V] + }, list) } -// NewListMerger creates a new merge cache that merges the results of -// other caches in list form. The function only gets called if any of -// the dependency has changed. +// MergeList merges a list of cached values. The function only gets called if +// any of the dependency has changed. // // The benefit of ListMerger over the basic Merger is that caches are // stored in an ordered list so the order of the cache will be @@ -165,31 +143,37 @@ type listMerger[T, V any] struct { // function will remerge all the dependencies together everytime. Since // the list of dependencies is constant, there is no way to save some // partial merge information either. -func NewListMerger[T, V any](mergeFn func(results []Result[T]) Result[V], caches []Data[T]) Data[V] { +func MergeList[T, V any](mergeFn func(results []Result[T]) (V, string, error), delegates []Value[T]) Value[V] { return &listMerger[T, V]{ - mergeFn: mergeFn, - caches: caches, + mergeFn: mergeFn, + delegates: delegates, } } +type listMerger[T, V any] struct { + lock sync.Mutex + mergeFn func([]Result[T]) (V, string, error) + delegates []Value[T] + cache []Result[T] + result Result[V] +} + func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { - cacheResults := make([]Result[T], len(c.caches)) + cacheResults := make([]Result[T], len(c.delegates)) ch := make(chan struct { int Result[T] - }, len(c.caches)) - for i := range c.caches { + }, len(c.delegates)) + for i := range c.delegates { go func(index int) { + value, etag, err := c.delegates[index].Get() ch <- struct { int Result[T] - }{ - index, - c.caches[index].Get(), - } + }{index, Result[T]{Value: value, Etag: etag, Err: err}} }(i) } - for i := 0; i < len(c.caches); i++ { + for i := 0; i < len(c.delegates); i++ { res := <-ch cacheResults[res.int] = res.Result } @@ -197,16 +181,16 @@ func (c *listMerger[T, V]) prepareResultsLocked() []Result[T] { } func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { - if c.cacheResults == nil { + if c.cache == nil { return true } if c.result.Err != nil { return true } - if len(results) != len(c.cacheResults) { - panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cacheResults))) + if len(results) != len(c.cache) { + panic(fmt.Errorf("invalid number of results: %v (expected %v)", len(results), len(c.cache))) } - for i, oldResult := range c.cacheResults { + for i, oldResult := range c.cache { newResult := results[i] if newResult.Etag != oldResult.Etag || newResult.Err != nil || oldResult.Err != nil { return true @@ -215,98 +199,92 @@ func (c *listMerger[T, V]) needsRunningLocked(results []Result[T]) bool { return false } -func (c *listMerger[T, V]) Get() Result[V] { +func (c *listMerger[T, V]) Get() (V, string, error) { c.lock.Lock() defer c.lock.Unlock() cacheResults := c.prepareResultsLocked() if c.needsRunningLocked(cacheResults) { - c.cacheResults = cacheResults - c.result = c.mergeFn(c.cacheResults) + c.cache = cacheResults + c.result.Value, c.result.Etag, c.result.Err = c.mergeFn(c.cache) } - return c.result + return c.result.Value, c.result.Etag, c.result.Err } -// NewTransformer creates a new cache that transforms the result of -// another cache. The transformFn will only be called if the source -// cache has updated the output, otherwise, the cached result will be -// returned. +// Transform the result of another cached value. The transformFn will only be called +// if the source has updated, otherwise, the result will be returned. // // If the dependency returned an error before, or it returns an error // this time, or if the transformerFn failed before, the function is // reran. -func NewTransformer[T, V any](transformerFn func(Result[T]) Result[V], source Data[T]) Data[V] { - return NewListMerger(func(caches []Result[T]) Result[V] { - if len(caches) != 1 { - panic(fmt.Errorf("invalid cache for transformer cache: %v", caches)) +func Transform[T, V any](transformerFn func(T, string, error) (V, string, error), source Value[T]) Value[V] { + return MergeList(func(delegates []Result[T]) (V, string, error) { + if len(delegates) != 1 { + panic(fmt.Errorf("invalid cache for transformer cache: %v", delegates)) } - return transformerFn(caches[0]) - }, []Data[T]{source}) -} - -// NewSource creates a new cache that generates some data. This -// will always be called since we don't know the origin of the data and -// if it needs to be updated or not. sourceFn MUST be thread-safe. -func NewSource[T any](sourceFn func() Result[T]) Data[T] { - c := source[T](sourceFn) - return &c + return transformerFn(delegates[0].Value, delegates[0].Etag, delegates[0].Err) + }, []Value[T]{source}) } -type source[T any] func() Result[T] - -func (c *source[T]) Get() Result[T] { - return (*c)() -} - -// NewStaticSource creates a new cache that always generates the -// same data. This will only be called once (lazily). -func NewStaticSource[T any](staticFn func() Result[T]) Data[T] { - return &static[T]{ - fn: staticFn, +// Once calls Value[T].Get() lazily and only once, even in case of an error result. +func Once[T any](d Value[T]) Value[T] { + return &once[T]{ + data: d, } } -type static[T any] struct { +type once[T any] struct { once sync.Once - fn func() Result[T] + data Value[T] result Result[T] } -func (c *static[T]) Get() Result[T] { +func (c *once[T]) Get() (T, string, error) { c.once.Do(func() { - c.result = c.fn() + c.result.Value, c.result.Etag, c.result.Err = c.data.Get() }) - return c.result + return c.result.Value, c.result.Etag, c.result.Err } -// Replaceable is a cache that carries the result even when the cache is -// replaced. This is the type that should typically be stored in -// structs. -type Replaceable[T any] struct { - cache atomic.Pointer[Data[T]] - result atomic.Pointer[Result[T]] +// Replaceable extends the Value[T] interface with the ability to change the +// underlying Value[T] after construction. +type Replaceable[T any] interface { + Value[T] + Store(Value[T]) } -// Get retrieves the data from the underlying source. [Replaceable] -// implements the [Data] interface itself. This is a pass-through -// that calls the most recent underlying cache. If the cache fails but -// previously had returned a success, that success will be returned -// instead. If the cache fails but we never returned a success, that -// failure is returned. -func (c *Replaceable[T]) Get() Result[T] { - result := (*c.cache.Load()).Get() - - for { - cResult := c.result.Load() - if result.Err != nil && cResult != nil && cResult.Err == nil { - return *cResult - } - if c.result.CompareAndSwap(cResult, &result) { - return result +// Atomic wraps a Value[T] as an atomic value that can be replaced. It implements +// Replaceable[T]. +type Atomic[T any] struct { + value atomic.Pointer[Value[T]] +} + +var _ Replaceable[[]byte] = &Atomic[[]byte]{} + +func (x *Atomic[T]) Store(val Value[T]) { x.value.Store(&val) } +func (x *Atomic[T]) Get() (T, string, error) { return (*x.value.Load()).Get() } + +// LastSuccess calls Value[T].Get(), but hides errors by returning the last +// success if there has been any. +type LastSuccess[T any] struct { + Atomic[T] + success atomic.Pointer[Result[T]] +} + +var _ Replaceable[[]byte] = &LastSuccess[[]byte]{} + +func (c *LastSuccess[T]) Get() (T, string, error) { + success := c.success.Load() + value, etag, err := c.Atomic.Get() + if err == nil { + if success == nil { + c.success.CompareAndSwap(nil, &Result[T]{Value: value, Etag: etag, Err: err}) } + return value, etag, err + } + + if success != nil { + return success.Value, success.Etag, success.Err } -} -// Replace changes the cache. -func (c *Replaceable[T]) Replace(cache Data[T]) { - c.cache.Swap(&cache) + return value, etag, err } diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 1a6c12e17..2e15e163c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -22,7 +22,6 @@ import ( "github.com/emicklei/go-restful/v3" - "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -172,43 +171,6 @@ type OpenAPIV3Config struct { DefaultSecurity []map[string][]string } -// ConvertConfigToV3 converts a Config object to an OpenAPIV3Config object -func ConvertConfigToV3(config *Config) *OpenAPIV3Config { - if config == nil { - return nil - } - - v3Config := &OpenAPIV3Config{ - Info: config.Info, - IgnorePrefixes: config.IgnorePrefixes, - GetDefinitions: config.GetDefinitions, - GetOperationIDAndTags: config.GetOperationIDAndTags, - GetOperationIDAndTagsFromRoute: config.GetOperationIDAndTagsFromRoute, - GetDefinitionName: config.GetDefinitionName, - Definitions: config.Definitions, - SecuritySchemes: make(spec3.SecuritySchemes), - DefaultSecurity: config.DefaultSecurity, - DefaultResponse: openapiconv.ConvertResponse(config.DefaultResponse, []string{"application/json"}), - - CommonResponses: make(map[int]*spec3.Response), - ResponseDefinitions: make(map[string]*spec3.Response), - } - - if config.SecurityDefinitions != nil { - for s, securityScheme := range *config.SecurityDefinitions { - v3Config.SecuritySchemes[s] = openapiconv.ConvertSecurityScheme(securityScheme) - } - } - for k, commonResponse := range config.CommonResponses { - v3Config.CommonResponses[k] = openapiconv.ConvertResponse(&commonResponse, []string{"application/json"}) - } - - for k, responseDefinition := range config.ResponseDefinitions { - v3Config.ResponseDefinitions[k] = openapiconv.ConvertResponse(&responseDefinition, []string{"application/json"}) - } - return v3Config -} - type typeInfo struct { name string format string diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index 2263e2f32..fc4563488 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -33,6 +33,7 @@ import ( openapi_v3 "github.com/google/gnostic-models/openapiv3" "github.com/google/uuid" "github.com/munnerz/goautoneg" + "k8s.io/klog/v2" "k8s.io/kube-openapi/pkg/cached" "k8s.io/kube-openapi/pkg/common" @@ -73,38 +74,38 @@ type timedSpec struct { // This type is protected by the lock on OpenAPIService. type openAPIV3Group struct { - specCache cached.Replaceable[*spec3.OpenAPI] - pbCache cached.Data[timedSpec] - jsonCache cached.Data[timedSpec] + specCache cached.LastSuccess[*spec3.OpenAPI] + pbCache cached.Value[timedSpec] + jsonCache cached.Value[timedSpec] } func newOpenAPIV3Group() *openAPIV3Group { o := &openAPIV3Group{} - o.jsonCache = cached.NewTransformer[*spec3.OpenAPI](func(result cached.Result[*spec3.OpenAPI]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.jsonCache = cached.Transform[*spec3.OpenAPI](func(spec *spec3.OpenAPI, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - json, err := json.Marshal(result.Data) + json, err := json.Marshal(spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: json, lastModified: time.Now()}, computeETag(json)) + return timedSpec{spec: json, lastModified: time.Now()}, computeETag(json), nil }, &o.specCache) - o.pbCache = cached.NewTransformer(func(result cached.Result[timedSpec]) cached.Result[timedSpec] { - if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + o.pbCache = cached.Transform(func(ts timedSpec, etag string, err error) (timedSpec, string, error) { + if err != nil { + return timedSpec{}, "", err } - proto, err := ToV3ProtoBinary(result.Data.spec) + proto, err := ToV3ProtoBinary(ts.spec) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: proto, lastModified: result.Data.lastModified}, result.Etag) + return timedSpec{spec: proto, lastModified: ts.lastModified}, etag, nil }, o.jsonCache) return o } -func (o *openAPIV3Group) UpdateSpec(openapi cached.Data[*spec3.OpenAPI]) { - o.specCache.Replace(openapi) +func (o *openAPIV3Group) UpdateSpec(openapi cached.Value[*spec3.OpenAPI]) { + o.specCache.Store(openapi) } // OpenAPIService is the service responsible for serving OpenAPI spec. It has @@ -114,7 +115,7 @@ type OpenAPIService struct { mutex sync.Mutex v3Schema map[string]*openAPIV3Group - discoveryCache cached.Replaceable[timedSpec] + discoveryCache cached.LastSuccess[timedSpec] } func computeETag(data []byte) string { @@ -137,20 +138,20 @@ func NewOpenAPIService() *OpenAPIService { o := &OpenAPIService{} o.v3Schema = make(map[string]*openAPIV3Group) // We're not locked because we haven't shared the structure yet. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) return o } -func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { - caches := make(map[string]cached.Data[timedSpec], len(o.v3Schema)) +func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Value[timedSpec] { + caches := make(map[string]cached.Value[timedSpec], len(o.v3Schema)) for gvName, group := range o.v3Schema { caches[gvName] = group.jsonCache } - return cached.NewMerger(func(results map[string]cached.Result[timedSpec]) cached.Result[timedSpec] { + return cached.Merge(func(results map[string]cached.Result[timedSpec]) (timedSpec, string, error) { discovery := &OpenAPIV3Discovery{Paths: make(map[string]OpenAPIV3DiscoveryGroupVersion)} for gvName, result := range results { if result.Err != nil { - return cached.NewResultErr[timedSpec](result.Err) + return timedSpec{}, "", result.Err } discovery.Paths[gvName] = OpenAPIV3DiscoveryGroupVersion{ ServerRelativeURL: constructServerRelativeURL(gvName, result.Etag), @@ -158,9 +159,9 @@ func (o *OpenAPIService) buildDiscoveryCacheLocked() cached.Data[timedSpec] { } j, err := json.Marshal(discovery) if err != nil { - return cached.NewResultErr[timedSpec](err) + return timedSpec{}, "", err } - return cached.NewResultOK(timedSpec{spec: j, lastModified: time.Now()}, computeETag(j)) + return timedSpec{spec: j, lastModified: time.Now()}, computeETag(j), nil }, caches) } @@ -171,32 +172,32 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by if !ok { return nil, "", time.Now(), fmt.Errorf("Cannot find CRD group %s", group) } - result := cached.Result[timedSpec]{} switch getType { case subTypeJSON: - result = v.jsonCache.Get() + ts, etag, err := v.jsonCache.Get() + return ts.spec, etag, ts.lastModified, err case subTypeProtobuf, subTypeProtobufDeprecated: - result = v.pbCache.Get() + ts, etag, err := v.pbCache.Get() + return ts.spec, etag, ts.lastModified, err default: return nil, "", time.Now(), fmt.Errorf("Invalid accept clause %s", getType) } - return result.Data.spec, result.Etag, result.Data.lastModified, result.Err } // UpdateGroupVersionLazy adds or updates an existing group with the new cached. -func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Data[*spec3.OpenAPI]) { +func (o *OpenAPIService) UpdateGroupVersionLazy(group string, openapi cached.Value[*spec3.OpenAPI]) { o.mutex.Lock() defer o.mutex.Unlock() if _, ok := o.v3Schema[group]; !ok { o.v3Schema[group] = newOpenAPIV3Group() // Since there is a new item, we need to re-build the cache map. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } o.v3Schema[group].UpdateSpec(openapi) } func (o *OpenAPIService) UpdateGroupVersion(group string, openapi *spec3.OpenAPI) { - o.UpdateGroupVersionLazy(group, cached.NewResultOK(openapi, uuid.New().String())) + o.UpdateGroupVersionLazy(group, cached.Static(openapi, uuid.New().String())) } func (o *OpenAPIService) DeleteGroupVersion(group string) { @@ -204,19 +205,19 @@ func (o *OpenAPIService) DeleteGroupVersion(group string) { defer o.mutex.Unlock() delete(o.v3Schema, group) // Rebuild the merge cache map since the items have changed. - o.discoveryCache.Replace(o.buildDiscoveryCacheLocked()) + o.discoveryCache.Store(o.buildDiscoveryCacheLocked()) } func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { - result := o.discoveryCache.Get() - if result.Err != nil { - klog.Errorf("Error serving discovery: %s", result.Err) + ts, etag, err := o.discoveryCache.Get() + if err != nil { + klog.Errorf("Error serving discovery: %s", err) w.WriteHeader(http.StatusInternalServerError) return } - w.Header().Set("Etag", strconv.Quote(result.Etag)) + w.Header().Set("Etag", strconv.Quote(etag)) w.Header().Set("Content-Type", "application/json") - http.ServeContent(w, r, "/openapi/v3", result.Data.lastModified, bytes.NewReader(result.Data.spec)) + http.ServeContent(w, r, "/openapi/v3", ts.lastModified, bytes.NewReader(ts.spec)) } func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index bef603782..da5485f6a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -22,3 +22,4 @@ var UseOptimizedJSONUnmarshalingV3 bool = true // Used by tests to selectively disable experimental JSON marshaler var UseOptimizedJSONMarshaling bool = true +var UseOptimizedJSONMarshalingV3 bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go b/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go deleted file mode 100644 index e993fe23d..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/openapiconv/convert.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openapiconv - -import ( - "strings" - - klog "k8s.io/klog/v2" - builderutil "k8s.io/kube-openapi/pkg/builder3/util" - "k8s.io/kube-openapi/pkg/spec3" - "k8s.io/kube-openapi/pkg/validation/spec" -) - -var OpenAPIV2DefPrefix = "#/definitions/" -var OpenAPIV3DefPrefix = "#/components/schemas/" - -// ConvertV2ToV3 converts an OpenAPI V2 object into V3. -// Certain references may be shared between the V2 and V3 objects in the conversion. -func ConvertV2ToV3(v2Spec *spec.Swagger) *spec3.OpenAPI { - v3Spec := &spec3.OpenAPI{ - Version: "3.0.0", - Info: v2Spec.Info, - ExternalDocs: ConvertExternalDocumentation(v2Spec.ExternalDocs), - Paths: ConvertPaths(v2Spec.Paths), - Components: ConvertComponents(v2Spec.SecurityDefinitions, v2Spec.Definitions, v2Spec.Responses, v2Spec.Produces), - } - - return v3Spec -} - -func ConvertExternalDocumentation(v2ED *spec.ExternalDocumentation) *spec3.ExternalDocumentation { - if v2ED == nil { - return nil - } - return &spec3.ExternalDocumentation{ - ExternalDocumentationProps: spec3.ExternalDocumentationProps{ - Description: v2ED.Description, - URL: v2ED.URL, - }, - } -} - -func ConvertComponents(v2SecurityDefinitions spec.SecurityDefinitions, v2Definitions spec.Definitions, v2Responses map[string]spec.Response, produces []string) *spec3.Components { - components := &spec3.Components{} - - if v2Definitions != nil { - components.Schemas = make(map[string]*spec.Schema) - } - for s, schema := range v2Definitions { - components.Schemas[s] = ConvertSchema(&schema) - } - if v2SecurityDefinitions != nil { - components.SecuritySchemes = make(spec3.SecuritySchemes) - } - for s, securityScheme := range v2SecurityDefinitions { - components.SecuritySchemes[s] = ConvertSecurityScheme(securityScheme) - } - if v2Responses != nil { - components.Responses = make(map[string]*spec3.Response) - } - for r, response := range v2Responses { - components.Responses[r] = ConvertResponse(&response, produces) - } - - return components -} - -func ConvertSchema(v2Schema *spec.Schema) *spec.Schema { - if v2Schema == nil { - return nil - } - v3Schema := spec.Schema{ - VendorExtensible: v2Schema.VendorExtensible, - SchemaProps: v2Schema.SchemaProps, - SwaggerSchemaProps: v2Schema.SwaggerSchemaProps, - ExtraProps: v2Schema.ExtraProps, - } - - if refString := v2Schema.Ref.String(); refString != "" { - if idx := strings.Index(refString, OpenAPIV2DefPrefix); idx != -1 { - v3Schema.Ref = spec.MustCreateRef(OpenAPIV3DefPrefix + refString[idx+len(OpenAPIV2DefPrefix):]) - } else { - klog.Errorf("Error: Swagger V2 Ref %s does not contain #/definitions\n", refString) - } - } - - if v2Schema.Properties != nil { - v3Schema.Properties = make(map[string]spec.Schema) - for key, property := range v2Schema.Properties { - v3Schema.Properties[key] = *ConvertSchema(&property) - } - } - if v2Schema.Items != nil { - v3Schema.Items = &spec.SchemaOrArray{ - Schema: ConvertSchema(v2Schema.Items.Schema), - Schemas: ConvertSchemaList(v2Schema.Items.Schemas), - } - } - - if v2Schema.AdditionalProperties != nil { - v3Schema.AdditionalProperties = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalProperties.Schema), - Allows: v2Schema.AdditionalProperties.Allows, - } - } - if v2Schema.AdditionalItems != nil { - v3Schema.AdditionalItems = &spec.SchemaOrBool{ - Schema: ConvertSchema(v2Schema.AdditionalItems.Schema), - Allows: v2Schema.AdditionalItems.Allows, - } - } - - return builderutil.WrapRefs(&v3Schema) -} - -func ConvertSchemaList(v2SchemaList []spec.Schema) []spec.Schema { - if v2SchemaList == nil { - return nil - } - v3SchemaList := []spec.Schema{} - for _, s := range v2SchemaList { - v3SchemaList = append(v3SchemaList, *ConvertSchema(&s)) - } - return v3SchemaList -} - -func ConvertSecurityScheme(v2securityScheme *spec.SecurityScheme) *spec3.SecurityScheme { - if v2securityScheme == nil { - return nil - } - securityScheme := &spec3.SecurityScheme{ - VendorExtensible: v2securityScheme.VendorExtensible, - SecuritySchemeProps: spec3.SecuritySchemeProps{ - Description: v2securityScheme.Description, - Type: v2securityScheme.Type, - Name: v2securityScheme.Name, - In: v2securityScheme.In, - }, - } - - if v2securityScheme.Flow != "" { - securityScheme.Flows = make(map[string]*spec3.OAuthFlow) - securityScheme.Flows[v2securityScheme.Flow] = &spec3.OAuthFlow{ - OAuthFlowProps: spec3.OAuthFlowProps{ - AuthorizationUrl: v2securityScheme.AuthorizationURL, - TokenUrl: v2securityScheme.TokenURL, - Scopes: v2securityScheme.Scopes, - }, - } - } - return securityScheme -} - -func ConvertPaths(v2Paths *spec.Paths) *spec3.Paths { - if v2Paths == nil { - return nil - } - paths := &spec3.Paths{ - VendorExtensible: v2Paths.VendorExtensible, - } - - if v2Paths.Paths != nil { - paths.Paths = make(map[string]*spec3.Path) - } - for k, v := range v2Paths.Paths { - paths.Paths[k] = ConvertPathItem(v) - } - return paths -} - -func ConvertPathItem(v2pathItem spec.PathItem) *spec3.Path { - path := &spec3.Path{ - Refable: v2pathItem.Refable, - PathProps: spec3.PathProps{ - Get: ConvertOperation(v2pathItem.Get), - Put: ConvertOperation(v2pathItem.Put), - Post: ConvertOperation(v2pathItem.Post), - Delete: ConvertOperation(v2pathItem.Delete), - Options: ConvertOperation(v2pathItem.Options), - Head: ConvertOperation(v2pathItem.Head), - Patch: ConvertOperation(v2pathItem.Patch), - }, - VendorExtensible: v2pathItem.VendorExtensible, - } - for _, param := range v2pathItem.Parameters { - path.Parameters = append(path.Parameters, ConvertParameter(param)) - } - return path -} - -func ConvertOperation(v2Operation *spec.Operation) *spec3.Operation { - if v2Operation == nil { - return nil - } - operation := &spec3.Operation{ - VendorExtensible: v2Operation.VendorExtensible, - OperationProps: spec3.OperationProps{ - Description: v2Operation.Description, - ExternalDocs: ConvertExternalDocumentation(v2Operation.OperationProps.ExternalDocs), - Tags: v2Operation.Tags, - Summary: v2Operation.Summary, - Deprecated: v2Operation.Deprecated, - OperationId: v2Operation.ID, - }, - } - - for _, param := range v2Operation.Parameters { - if param.ParamProps.Name == "body" && param.ParamProps.Schema != nil { - operation.OperationProps.RequestBody = &spec3.RequestBody{ - RequestBodyProps: spec3.RequestBodyProps{}, - } - if v2Operation.Consumes != nil { - operation.RequestBody.Content = make(map[string]*spec3.MediaType) - } - for _, consumer := range v2Operation.Consumes { - operation.RequestBody.Content[consumer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(param.ParamProps.Schema), - }, - } - } - } else { - operation.Parameters = append(operation.Parameters, ConvertParameter(param)) - } - } - - operation.Responses = &spec3.Responses{ResponsesProps: spec3.ResponsesProps{ - Default: ConvertResponse(v2Operation.Responses.Default, v2Operation.Produces), - }, - VendorExtensible: v2Operation.Responses.VendorExtensible, - } - - if v2Operation.Responses.StatusCodeResponses != nil { - operation.Responses.StatusCodeResponses = make(map[int]*spec3.Response) - } - for k, v := range v2Operation.Responses.StatusCodeResponses { - operation.Responses.StatusCodeResponses[k] = ConvertResponse(&v, v2Operation.Produces) - } - return operation -} - -func ConvertResponse(v2Response *spec.Response, produces []string) *spec3.Response { - if v2Response == nil { - return nil - } - response := &spec3.Response{ - Refable: ConvertRefableResponse(v2Response.Refable), - VendorExtensible: v2Response.VendorExtensible, - ResponseProps: spec3.ResponseProps{ - Description: v2Response.Description, - }, - } - - if v2Response.Schema != nil { - if produces != nil { - response.Content = make(map[string]*spec3.MediaType) - } - for _, producer := range produces { - response.ResponseProps.Content[producer] = &spec3.MediaType{ - MediaTypeProps: spec3.MediaTypeProps{ - Schema: ConvertSchema(v2Response.Schema), - }, - } - } - } - return response -} - -func ConvertParameter(v2Param spec.Parameter) *spec3.Parameter { - param := &spec3.Parameter{ - Refable: ConvertRefableParameter(v2Param.Refable), - VendorExtensible: v2Param.VendorExtensible, - ParameterProps: spec3.ParameterProps{ - Name: v2Param.Name, - Description: v2Param.Description, - In: v2Param.In, - Required: v2Param.Required, - Schema: ConvertSchema(v2Param.Schema), - AllowEmptyValue: v2Param.AllowEmptyValue, - }, - } - // Convert SimpleSchema into Schema - if param.Schema == nil { - param.Schema = &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{v2Param.Type}, - Format: v2Param.Format, - UniqueItems: v2Param.UniqueItems, - }, - } - } - - return param -} - -func ConvertRefableParameter(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/parameters/", "#/components/parameters/", 1))} - } - return refable -} - -func ConvertRefableResponse(refable spec.Refable) spec.Refable { - if refable.Ref.String() != "" { - return spec.Refable{Ref: spec.MustCreateRef(strings.Replace(refable.Ref.String(), "#/responses/", "#/components/responses/", 1))} - } - return refable -} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go b/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go deleted file mode 100644 index 3fac658e3..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/schemamutation/walker.go +++ /dev/null @@ -1,519 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package schemamutation - -import ( - "k8s.io/kube-openapi/pkg/validation/spec" -) - -// Walker runs callback functions on all references of an OpenAPI spec, -// replacing the values when visiting corresponding types. -type Walker struct { - // SchemaCallback will be called on each schema, taking the original schema, - // and before any other callbacks of the Walker. - // If the schema needs to be mutated, DO NOT mutate it in-place, - // always create a copy, mutate, and return it. - SchemaCallback func(schema *spec.Schema) *spec.Schema - - // RefCallback will be called on each ref. - // If the ref needs to be mutated, DO NOT mutate it in-place, - // always create a copy, mutate, and return it. - RefCallback func(ref *spec.Ref) *spec.Ref -} - -type SchemaCallbackFunc func(schema *spec.Schema) *spec.Schema -type RefCallbackFunc func(ref *spec.Ref) *spec.Ref - -var SchemaCallBackNoop SchemaCallbackFunc = func(schema *spec.Schema) *spec.Schema { - return schema -} -var RefCallbackNoop RefCallbackFunc = func(ref *spec.Ref) *spec.Ref { - return ref -} - -// ReplaceReferences rewrites the references without mutating the input. -// The output might share data with the input. -func ReplaceReferences(walkRef func(ref *spec.Ref) *spec.Ref, sp *spec.Swagger) *spec.Swagger { - walker := &Walker{RefCallback: walkRef, SchemaCallback: SchemaCallBackNoop} - return walker.WalkRoot(sp) -} - -func (w *Walker) WalkSchema(schema *spec.Schema) *spec.Schema { - if schema == nil { - return nil - } - - orig := schema - clone := func() { - if orig == schema { - schema = &spec.Schema{} - *schema = *orig - } - } - - // Always run callback on the whole schema first - // so that SchemaCallback can take the original schema as input. - schema = w.SchemaCallback(schema) - - if r := w.RefCallback(&schema.Ref); r != &schema.Ref { - clone() - schema.Ref = *r - } - - definitionsCloned := false - for k, v := range schema.Definitions { - if s := w.WalkSchema(&v); s != &v { - if !definitionsCloned { - definitionsCloned = true - clone() - schema.Definitions = make(spec.Definitions, len(orig.Definitions)) - for k2, v2 := range orig.Definitions { - schema.Definitions[k2] = v2 - } - } - schema.Definitions[k] = *s - } - } - - propertiesCloned := false - for k, v := range schema.Properties { - if s := w.WalkSchema(&v); s != &v { - if !propertiesCloned { - propertiesCloned = true - clone() - schema.Properties = make(map[string]spec.Schema, len(orig.Properties)) - for k2, v2 := range orig.Properties { - schema.Properties[k2] = v2 - } - } - schema.Properties[k] = *s - } - } - - patternPropertiesCloned := false - for k, v := range schema.PatternProperties { - if s := w.WalkSchema(&v); s != &v { - if !patternPropertiesCloned { - patternPropertiesCloned = true - clone() - schema.PatternProperties = make(map[string]spec.Schema, len(orig.PatternProperties)) - for k2, v2 := range orig.PatternProperties { - schema.PatternProperties[k2] = v2 - } - } - schema.PatternProperties[k] = *s - } - } - - allOfCloned := false - for i := range schema.AllOf { - if s := w.WalkSchema(&schema.AllOf[i]); s != &schema.AllOf[i] { - if !allOfCloned { - allOfCloned = true - clone() - schema.AllOf = make([]spec.Schema, len(orig.AllOf)) - copy(schema.AllOf, orig.AllOf) - } - schema.AllOf[i] = *s - } - } - - anyOfCloned := false - for i := range schema.AnyOf { - if s := w.WalkSchema(&schema.AnyOf[i]); s != &schema.AnyOf[i] { - if !anyOfCloned { - anyOfCloned = true - clone() - schema.AnyOf = make([]spec.Schema, len(orig.AnyOf)) - copy(schema.AnyOf, orig.AnyOf) - } - schema.AnyOf[i] = *s - } - } - - oneOfCloned := false - for i := range schema.OneOf { - if s := w.WalkSchema(&schema.OneOf[i]); s != &schema.OneOf[i] { - if !oneOfCloned { - oneOfCloned = true - clone() - schema.OneOf = make([]spec.Schema, len(orig.OneOf)) - copy(schema.OneOf, orig.OneOf) - } - schema.OneOf[i] = *s - } - } - - if schema.Not != nil { - if s := w.WalkSchema(schema.Not); s != schema.Not { - clone() - schema.Not = s - } - } - - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - if s := w.WalkSchema(schema.AdditionalProperties.Schema); s != schema.AdditionalProperties.Schema { - clone() - schema.AdditionalProperties = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalProperties.Allows} - } - } - - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - if s := w.WalkSchema(schema.AdditionalItems.Schema); s != schema.AdditionalItems.Schema { - clone() - schema.AdditionalItems = &spec.SchemaOrBool{Schema: s, Allows: schema.AdditionalItems.Allows} - } - } - - if schema.Items != nil { - if schema.Items.Schema != nil { - if s := w.WalkSchema(schema.Items.Schema); s != schema.Items.Schema { - clone() - schema.Items = &spec.SchemaOrArray{Schema: s} - } - } else { - itemsCloned := false - for i := range schema.Items.Schemas { - if s := w.WalkSchema(&schema.Items.Schemas[i]); s != &schema.Items.Schemas[i] { - if !itemsCloned { - clone() - schema.Items = &spec.SchemaOrArray{ - Schemas: make([]spec.Schema, len(orig.Items.Schemas)), - } - itemsCloned = true - copy(schema.Items.Schemas, orig.Items.Schemas) - } - schema.Items.Schemas[i] = *s - } - } - } - } - - return schema -} - -func (w *Walker) walkParameter(param *spec.Parameter) *spec.Parameter { - if param == nil { - return nil - } - - orig := param - cloned := false - clone := func() { - if !cloned { - cloned = true - param = &spec.Parameter{} - *param = *orig - } - } - - if r := w.RefCallback(¶m.Ref); r != ¶m.Ref { - clone() - param.Ref = *r - } - if s := w.WalkSchema(param.Schema); s != param.Schema { - clone() - param.Schema = s - } - if param.Items != nil { - if r := w.RefCallback(¶m.Items.Ref); r != ¶m.Items.Ref { - param.Items.Ref = *r - } - } - - return param -} - -func (w *Walker) walkParameters(params []spec.Parameter) ([]spec.Parameter, bool) { - if params == nil { - return nil, false - } - - orig := params - cloned := false - clone := func() { - if !cloned { - cloned = true - params = make([]spec.Parameter, len(params)) - copy(params, orig) - } - } - - for i := range params { - if s := w.walkParameter(¶ms[i]); s != ¶ms[i] { - clone() - params[i] = *s - } - } - - return params, cloned -} - -func (w *Walker) walkResponse(resp *spec.Response) *spec.Response { - if resp == nil { - return nil - } - - orig := resp - cloned := false - clone := func() { - if !cloned { - cloned = true - resp = &spec.Response{} - *resp = *orig - } - } - - if r := w.RefCallback(&resp.Ref); r != &resp.Ref { - clone() - resp.Ref = *r - } - if s := w.WalkSchema(resp.Schema); s != resp.Schema { - clone() - resp.Schema = s - } - - return resp -} - -func (w *Walker) walkResponses(resps *spec.Responses) *spec.Responses { - if resps == nil { - return nil - } - - orig := resps - cloned := false - clone := func() { - if !cloned { - cloned = true - resps = &spec.Responses{} - *resps = *orig - } - } - - if r := w.walkResponse(resps.ResponsesProps.Default); r != resps.ResponsesProps.Default { - clone() - resps.Default = r - } - - responsesCloned := false - for k, v := range resps.ResponsesProps.StatusCodeResponses { - if r := w.walkResponse(&v); r != &v { - if !responsesCloned { - responsesCloned = true - clone() - resps.ResponsesProps.StatusCodeResponses = make(map[int]spec.Response, len(orig.StatusCodeResponses)) - for k2, v2 := range orig.StatusCodeResponses { - resps.ResponsesProps.StatusCodeResponses[k2] = v2 - } - } - resps.ResponsesProps.StatusCodeResponses[k] = *r - } - } - - return resps -} - -func (w *Walker) walkOperation(op *spec.Operation) *spec.Operation { - if op == nil { - return nil - } - - orig := op - cloned := false - clone := func() { - if !cloned { - cloned = true - op = &spec.Operation{} - *op = *orig - } - } - - parametersCloned := false - for i := range op.Parameters { - if s := w.walkParameter(&op.Parameters[i]); s != &op.Parameters[i] { - if !parametersCloned { - parametersCloned = true - clone() - op.Parameters = make([]spec.Parameter, len(orig.Parameters)) - copy(op.Parameters, orig.Parameters) - } - op.Parameters[i] = *s - } - } - - if r := w.walkResponses(op.Responses); r != op.Responses { - clone() - op.Responses = r - } - - return op -} - -func (w *Walker) walkPathItem(pathItem *spec.PathItem) *spec.PathItem { - if pathItem == nil { - return nil - } - - orig := pathItem - cloned := false - clone := func() { - if !cloned { - cloned = true - pathItem = &spec.PathItem{} - *pathItem = *orig - } - } - - if p, changed := w.walkParameters(pathItem.Parameters); changed { - clone() - pathItem.Parameters = p - } - if op := w.walkOperation(pathItem.Get); op != pathItem.Get { - clone() - pathItem.Get = op - } - if op := w.walkOperation(pathItem.Head); op != pathItem.Head { - clone() - pathItem.Head = op - } - if op := w.walkOperation(pathItem.Delete); op != pathItem.Delete { - clone() - pathItem.Delete = op - } - if op := w.walkOperation(pathItem.Options); op != pathItem.Options { - clone() - pathItem.Options = op - } - if op := w.walkOperation(pathItem.Patch); op != pathItem.Patch { - clone() - pathItem.Patch = op - } - if op := w.walkOperation(pathItem.Post); op != pathItem.Post { - clone() - pathItem.Post = op - } - if op := w.walkOperation(pathItem.Put); op != pathItem.Put { - clone() - pathItem.Put = op - } - - return pathItem -} - -func (w *Walker) walkPaths(paths *spec.Paths) *spec.Paths { - if paths == nil { - return nil - } - - orig := paths - cloned := false - clone := func() { - if !cloned { - cloned = true - paths = &spec.Paths{} - *paths = *orig - } - } - - pathsCloned := false - for k, v := range paths.Paths { - if p := w.walkPathItem(&v); p != &v { - if !pathsCloned { - pathsCloned = true - clone() - paths.Paths = make(map[string]spec.PathItem, len(orig.Paths)) - for k2, v2 := range orig.Paths { - paths.Paths[k2] = v2 - } - } - paths.Paths[k] = *p - } - } - - return paths -} - -func (w *Walker) WalkRoot(swagger *spec.Swagger) *spec.Swagger { - if swagger == nil { - return nil - } - - orig := swagger - cloned := false - clone := func() { - if !cloned { - cloned = true - swagger = &spec.Swagger{} - *swagger = *orig - } - } - - parametersCloned := false - for k, v := range swagger.Parameters { - if p := w.walkParameter(&v); p != &v { - if !parametersCloned { - parametersCloned = true - clone() - swagger.Parameters = make(map[string]spec.Parameter, len(orig.Parameters)) - for k2, v2 := range orig.Parameters { - swagger.Parameters[k2] = v2 - } - } - swagger.Parameters[k] = *p - } - } - - responsesCloned := false - for k, v := range swagger.Responses { - if r := w.walkResponse(&v); r != &v { - if !responsesCloned { - responsesCloned = true - clone() - swagger.Responses = make(map[string]spec.Response, len(orig.Responses)) - for k2, v2 := range orig.Responses { - swagger.Responses[k2] = v2 - } - } - swagger.Responses[k] = *r - } - } - - definitionsCloned := false - for k, v := range swagger.Definitions { - if s := w.WalkSchema(&v); s != &v { - if !definitionsCloned { - definitionsCloned = true - clone() - swagger.Definitions = make(spec.Definitions, len(orig.Definitions)) - for k2, v2 := range orig.Definitions { - swagger.Definitions[k2] = v2 - } - } - swagger.Definitions[k] = *s - } - } - - if swagger.Paths != nil { - if p := w.walkPaths(swagger.Paths); p != swagger.Paths { - clone() - swagger.Paths = p - } - } - - return swagger -} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 699291f1d..1f62c6e77 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -32,6 +32,9 @@ type Encoding struct { // MarshalJSON is a custom marshal function that knows how to encode Encoding as JSON func (e *Encoding) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.EncodingProps) if err != nil { return nil, err @@ -43,6 +46,16 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *Encoding) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + EncodingProps encodingPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.EncodingProps = encodingPropsOmitZero(e.EncodingProps) + return opts.MarshalNext(enc, x) +} + func (e *Encoding) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) @@ -82,3 +95,11 @@ type EncodingProps struct { // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } + +type encodingPropsOmitZero struct { + ContentType string `json:"contentType,omitempty"` + Headers map[string]*Header `json:"headers,omitempty"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 03b872717..8834a92e6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -36,6 +36,9 @@ type Example struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (e *Example) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.Refable) if err != nil { return nil, err @@ -50,6 +53,17 @@ func (e *Example) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b1, b2, b3), nil } +func (e *Example) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ExampleProps `json:",inline"` + spec.Extensions + } + x.Ref = e.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExampleProps = e.ExampleProps + return opts.MarshalNext(enc, x) +} func (e *Example) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index e79956721..f0515496e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -39,6 +39,9 @@ type ExternalDocumentationProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(e) + } b1, err := json.Marshal(e.ExternalDocumentationProps) if err != nil { return nil, err @@ -50,6 +53,16 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *ExternalDocumentation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ExternalDocumentationProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.ExternalDocumentationProps = e.ExternalDocumentationProps + return opts.MarshalNext(enc, x) +} + func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, e) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go index bc19dd48e..08b6246ce 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -35,6 +35,18 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ func(o *OpenAPI, c fuzz.Continue) { c.FuzzNoCustom(o) o.Version = "3.0.0" + for i, val := range o.SecurityRequirement { + if val == nil { + o.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, func(r *interface{}, c fuzz.Continue) { switch c.Intn(3) { @@ -169,6 +181,21 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ c.Fuzz(&v.ResponseProps) c.Fuzz(&v.VendorExtensible) }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + // Do not fuzz null values into the array. + for i, val := range v.SecurityRequirement { + if val == nil { + v.SecurityRequirement[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, func(v *spec.Extensions, c fuzz.Continue) { numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index ee5a30f79..9ea30628c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -36,6 +36,9 @@ type Header struct { // MarshalJSON is a custom marshal function that knows how to encode Header as JSON func (h *Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (h *Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (h *Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + HeaderProps headerPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = h.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = headerPropsOmitZero(h.HeaderProps) + return opts.MarshalNext(enc, x) +} + func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, h) @@ -109,3 +124,19 @@ type HeaderProps struct { // Examples of the header Examples map[string]*Example `json:"examples,omitempty"` } + +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type headerPropsOmitZero struct { + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index d390e69bc..47eef1edb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -35,6 +35,9 @@ type MediaType struct { // MarshalJSON is a custom marshal function that knows how to encode MediaType as JSON func (m *MediaType) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(m) + } b1, err := json.Marshal(m.MediaTypeProps) if err != nil { return nil, err @@ -46,6 +49,16 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (e *MediaType) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + MediaTypeProps mediaTypePropsOmitZero `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(e.Extensions) + x.MediaTypeProps = mediaTypePropsOmitZero(e.MediaTypeProps) + return opts.MarshalNext(enc, x) +} + func (m *MediaType) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, m) @@ -84,3 +97,10 @@ type MediaTypeProps struct { // A map between a property name and its encoding information. The key, being the property name, MUST exist in the schema as a property. The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded Encoding map[string]*Encoding `json:"encoding,omitempty"` } + +type mediaTypePropsOmitZero struct { + Schema *spec.Schema `json:"schema,omitzero"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` + Encoding map[string]*Encoding `json:"encoding,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index 28230610b..f1e102547 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -35,6 +35,9 @@ type Operation struct { // MarshalJSON is a custom marshal function that knows how to encode Operation as JSON func (o *Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -46,6 +49,16 @@ func (o *Operation) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (o *Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + spec.Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { @@ -95,3 +108,17 @@ type OperationProps struct { // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } + +type operationPropsOmitZero struct { + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + OperationId string `json:"operationId,omitempty"` + Parameters []*Parameter `json:"parameters,omitempty"` + RequestBody *RequestBody `json:"requestBody,omitzero"` + Responses *Responses `json:"responses,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + Servers []*Server `json:"servers,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 613da71a6..ada7edb63 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -36,6 +36,9 @@ type Parameter struct { // MarshalJSON is a custom marshal function that knows how to encode Parameter as JSON func (p *Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + ParameterProps parameterPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParameterProps = parameterPropsOmitZero(p.ParameterProps) + return opts.MarshalNext(enc, x) +} + func (p *Parameter) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) @@ -114,3 +129,19 @@ type ParameterProps struct { // Examples of the parameter's potential value. Each example SHOULD contain a value in the correct format as specified in the parameter encoding Examples map[string]*Example `json:"examples,omitempty"` } + +type parameterPropsOmitZero struct { + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitzero"` + Deprecated bool `json:"deprecated,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` + Style string `json:"style,omitempty"` + Explode bool `json:"explode,omitzero"` + AllowReserved bool `json:"allowReserved,omitzero"` + Schema *spec.Schema `json:"schema,omitzero"` + Content map[string]*MediaType `json:"content,omitempty"` + Example interface{} `json:"example,omitempty"` + Examples map[string]*Example `json:"examples,omitempty"` +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index 40d9061ac..16fbbb4dd 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -35,15 +35,41 @@ type Paths struct { // MarshalJSON is a custom marshal function that knows how to encode Paths as JSON func (p *Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.Paths) + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } + b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err } - b2, err := json.Marshal(p.VendorExtensible) + + pths := make(map[string]*Path) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) if err != nil { return nil, err } - return swag.ConcatJSON(b1, b2), nil + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +func (p *Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) } // UnmarshalJSON hydrates this items instance with the data from JSON @@ -144,6 +170,9 @@ type Path struct { // MarshalJSON is a custom marshal function that knows how to encode Path as JSON func (p *Path) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -159,6 +188,18 @@ func (p *Path) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (p *Path) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + PathProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathProps = p.PathProps + return opts.MarshalNext(enc, x) +} + func (p *Path) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, p) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 33267ce67..6f8607e40 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -36,6 +36,9 @@ type RequestBody struct { // MarshalJSON is a custom marshal function that knows how to encode RequestBody as JSON func (r *RequestBody) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -51,6 +54,18 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *RequestBody) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + RequestBodyProps requestBodyPropsOmitZero `json:",inline"` + spec.Extensions + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.RequestBodyProps = requestBodyPropsOmitZero(r.RequestBodyProps) + return opts.MarshalNext(enc, x) +} + func (r *RequestBody) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -77,6 +92,12 @@ type RequestBodyProps struct { Required bool `json:"required,omitempty"` } +type requestBodyPropsOmitZero struct { + Description string `json:"description,omitempty"` + Content map[string]*MediaType `json:"content,omitempty"` + Required bool `json:"required,omitzero"` +} + func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { var x struct { spec.Extensions diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index 95b388e6c..73e241fdc 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -37,6 +37,9 @@ type Responses struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (r *Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -48,6 +51,25 @@ func (r *Responses) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitzero"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + func (r *Responses) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -179,6 +201,9 @@ type Response struct { // MarshalJSON is a custom marshal function that knows how to encode Response as JSON func (r *Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -194,6 +219,18 @@ func (r *Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + ResponseProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = r.ResponseProps + return opts.MarshalNext(enc, x) +} + func (r *Response) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) @@ -247,6 +284,9 @@ type Link struct { // MarshalJSON is a custom marshal function that knows how to encode Link as JSON func (r *Link) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.Refable) if err != nil { return nil, err @@ -262,6 +302,18 @@ func (r *Link) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r *Link) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + spec.Extensions + LinkProps `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.LinkProps = r.LinkProps + return opts.MarshalNext(enc, x) +} + func (r *Link) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, r) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index edf7e6de3..dd1e98ed8 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -32,6 +34,9 @@ type SecurityScheme struct { // MarshalJSON is a custom marshal function that knows how to encode SecurityScheme as JSON func (s *SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -47,6 +52,18 @@ func (s *SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (s *SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + SecuritySchemeProps `json:",inline"` + spec.Extensions + } + x.Ref = s.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON hydrates this items instance with the data from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index d5df0a781..654a42c06 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -41,6 +41,9 @@ type ServerProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *Server) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerProps) if err != nil { return nil, err @@ -52,6 +55,16 @@ func (s *Server) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *Server) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerProps = s.ServerProps + return opts.MarshalNext(enc, x) +} + func (s *Server) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) @@ -96,6 +109,9 @@ type ServerVariableProps struct { // MarshalJSON is a custom marshal function that knows how to encode Responses as JSON func (s *ServerVariable) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.ServerVariableProps) if err != nil { return nil, err @@ -107,6 +123,16 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s *ServerVariable) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + ServerVariableProps `json:",inline"` + spec.Extensions + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.ServerVariableProps = s.ServerVariableProps + return opts.MarshalNext(enc, x) +} + func (s *ServerVariable) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshalingV3 { return jsonv2.Unmarshal(data, s) diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index bed096fb7..5db819c7f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -36,6 +36,8 @@ type OpenAPI struct { Servers []*Server `json:"servers,omitempty"` // Components hold various schemas for the specification Components *Components `json:"components,omitempty"` + // SecurityRequirement holds a declaration of which security mechanisms can be used across the API + SecurityRequirement []map[string][]string `json:"security,omitempty"` // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } @@ -48,3 +50,26 @@ func (o *OpenAPI) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &p) } + +func (o *OpenAPI) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshalingV3 { + return internal.DeterministicMarshal(o) + } + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + return json.Marshal(&p) +} + +func (o *OpenAPI) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type OpenAPIOmitZero struct { + Version string `json:"openapi"` + Info *spec.Info `json:"info"` + Paths *Paths `json:"paths,omitzero"` + Servers []*Server `json:"servers,omitempty"` + Components *Components `json:"components,omitzero"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + } + x := (*OpenAPIOmitZero)(o) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go deleted file mode 100644 index c66f998f5..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go +++ /dev/null @@ -1,502 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "github.com/go-openapi/jsonreference" - "github.com/google/go-cmp/cmp" - fuzz "github.com/google/gofuzz" -) - -var SwaggerFuzzFuncs []interface{} = []interface{}{ - func(v *Responses, c fuzz.Continue) { - c.FuzzNoCustom(v) - if v.Default != nil { - // Check if we hit maxDepth and left an incomplete value - if v.Default.Description == "" { - v.Default = nil - v.StatusCodeResponses = nil - } - } - - // conversion has no way to discern empty statusCodeResponses from - // nil, since "default" is always included in the map. - // So avoid empty responses list - if len(v.StatusCodeResponses) == 0 { - v.StatusCodeResponses = nil - } - }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v != nil { - // force non-nil - v.Responses = &Responses{} - c.Fuzz(v.Responses) - - v.Schemes = nil - if c.RandBool() { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandBool() { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - } - }, - func(v map[int]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Prevent negative numbers - num := c.Intn(4) - for i := 0; i < num+2; i++ { - val := Response{} - c.Fuzz(&val) - - val.Description = c.RandString() + "x" - v[100*(i+1)+c.Intn(100)] = val - } - }, - func(v map[string]PathItem, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - num := c.Intn(5) - for i := 0; i < num+2; i++ { - val := PathItem{} - c.Fuzz(&val) - - // Ref params are only allowed in certain locations, so - // possibly add a few to PathItems - numRefsToAdd := c.Intn(5) - for i := 0; i < numRefsToAdd; i++ { - theRef := Parameter{} - c.Fuzz(&theRef.Refable) - - val.Parameters = append(val.Parameters, theRef) - } - - v["/"+c.RandString()] = val - } - }, - func(v *SchemaOrArray, c fuzz.Continue) { - *v = SchemaOrArray{} - // gnostic parser just doesn't support more - // than one Schema here - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - }, - func(v *SchemaOrBool, c fuzz.Continue) { - *v = SchemaOrBool{} - - if c.RandBool() { - v.Allows = c.RandBool() - } else { - v.Schema = &Schema{} - v.Allows = true - c.Fuzz(&v.Schema) - } - }, - func(v map[string]Response, c fuzz.Continue) { - n := 0 - c.Fuzz(&n) - if n == 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - return - } - - // Response definitions are not allowed to - // be refs - for i := 0; i < c.Intn(5)+1; i++ { - resp := &Response{} - - c.Fuzz(resp) - resp.Ref = Ref{} - resp.Description = c.RandString() + "x" - - // Response refs are not vendor extensible by gnostic - resp.VendorExtensible.Extensions = nil - v[c.RandString()+"x"] = *resp - } - }, - func(v *Header, c fuzz.Continue) { - if v != nil { - c.FuzzNoCustom(v) - - // descendant Items of Header may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Ref, c fuzz.Continue) { - *v = Ref{} - v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - }, - func(v *Response, c fuzz.Continue) { - *v = Response{} - if c.RandBool() { - v.Ref = Ref{} - v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) - } else { - c.Fuzz(&v.VendorExtensible) - c.Fuzz(&v.Schema) - c.Fuzz(&v.ResponseProps) - - v.Headers = nil - v.Ref = Ref{} - - n := 0 - c.Fuzz(&n) - if n != 0 { - // Test that fuzzer is not at maxDepth so we do not - // end up with empty elements - num := c.Intn(4) - for i := 0; i < num; i++ { - if v.Headers == nil { - v.Headers = make(map[string]Header) - } - hdr := Header{} - c.Fuzz(&hdr) - if hdr.Type == "" { - // hit maxDepth, just abort trying to make haders - v.Headers = nil - break - } - v.Headers[c.RandString()+"x"] = hdr - } - } else { - v.Headers = nil - } - } - - v.Description = c.RandString() + "x" - - // Gnostic parses empty as nil, so to keep avoid putting empty - if len(v.Headers) == 0 { - v.Headers = nil - } - }, - func(v **Info, c fuzz.Continue) { - // Info is never nil - *v = &Info{} - c.FuzzNoCustom(*v) - - (*v).Title = c.RandString() + "x" - }, - func(v *Extensions, c fuzz.Continue) { - // gnostic parser only picks up x- vendor extensions - numChildren := c.Intn(5) - for i := 0; i < numChildren; i++ { - if *v == nil { - *v = Extensions{} - } - (*v)["x-"+c.RandString()] = c.RandString() - } - }, - func(v *Swagger, c fuzz.Continue) { - c.FuzzNoCustom(v) - - if v.Paths == nil { - // Force paths non-nil since it does not have omitempty in json tag. - // This means a perfect roundtrip (via json) is impossible, - // since we can't tell the difference between empty/unspecified paths - v.Paths = &Paths{} - c.Fuzz(v.Paths) - } - - v.Swagger = "2.0" - - // Gnostic support serializing ID at all - // unavoidable data loss - v.ID = "" - - v.Schemes = nil - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "http") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "https") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "ws") - } - - if c.RandUint64()%2 == 1 { - v.Schemes = append(v.Schemes, "wss") - } - - // Gnostic unconditionally makes security values non-null - // So do not fuzz null values into the array. - for i, val := range v.Security { - if val == nil { - v.Security[i] = make(map[string][]string) - } - - for k, v := range val { - if v == nil { - val[k] = make([]string, 0) - } - } - } - }, - func(v *SecurityScheme, c fuzz.Continue) { - v.Description = c.RandString() + "x" - c.Fuzz(&v.VendorExtensible) - - switch c.Intn(3) { - case 0: - v.Type = "basic" - case 1: - v.Type = "apiKey" - switch c.Intn(2) { - case 0: - v.In = "header" - case 1: - v.In = "query" - default: - panic("unreachable") - } - v.Name = "x" + c.RandString() - case 2: - v.Type = "oauth2" - - switch c.Intn(4) { - case 0: - v.Flow = "accessCode" - v.TokenURL = "https://" + c.RandString() - v.AuthorizationURL = "https://" + c.RandString() - case 1: - v.Flow = "application" - v.TokenURL = "https://" + c.RandString() - case 2: - v.Flow = "implicit" - v.AuthorizationURL = "https://" + c.RandString() - case 3: - v.Flow = "password" - v.TokenURL = "https://" + c.RandString() - default: - panic("unreachable") - } - c.Fuzz(&v.Scopes) - default: - panic("unreachable") - } - }, - func(v *interface{}, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *string, c fuzz.Continue) { - *v = c.RandString() + "x" - }, - func(v *ExternalDocumentation, c fuzz.Continue) { - v.Description = c.RandString() + "x" - v.URL = c.RandString() + "x" - }, - func(v *SimpleSchema, c fuzz.Continue) { - c.FuzzNoCustom(v) - - switch c.Intn(5) { - case 0: - v.Type = "string" - case 1: - v.Type = "number" - case 2: - v.Type = "boolean" - case 3: - v.Type = "integer" - case 4: - v.Type = "array" - default: - panic("unreachable") - } - - switch c.Intn(5) { - case 0: - v.CollectionFormat = "csv" - case 1: - v.CollectionFormat = "ssv" - case 2: - v.CollectionFormat = "tsv" - case 3: - v.CollectionFormat = "pipes" - case 4: - v.CollectionFormat = "" - default: - panic("unreachable") - } - - // None of the types which include SimpleSchema in our definitions - // actually support "example" in the official spec - v.Example = nil - - // unsupported by openapi - v.Nullable = false - }, - func(v *int64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0 { - *v = 1 - } - }, - func(v *float64, c fuzz.Continue) { - c.Fuzz(v) - - // Gnostic does not differentiate between 0 and non-specified - // so avoid using 0 for fuzzer - if *v == 0.0 { - *v = 1.0 - } - }, - func(v *Parameter, c fuzz.Continue) { - if v == nil { - return - } - c.Fuzz(&v.VendorExtensible) - if c.RandBool() { - // body param - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - v.In = "body" - c.Fuzz(&v.Description) - c.Fuzz(&v.Required) - - v.Schema = &Schema{} - c.Fuzz(&v.Schema) - - } else { - c.Fuzz(&v.SimpleSchema) - c.Fuzz(&v.CommonValidations) - v.AllowEmptyValue = false - v.Description = c.RandString() + "x" - v.Name = c.RandString() + "x" - - switch c.Intn(4) { - case 0: - // Header param - v.In = "header" - case 1: - // Form data param - v.In = "formData" - v.AllowEmptyValue = c.RandBool() - case 2: - // Query param - v.In = "query" - v.AllowEmptyValue = c.RandBool() - case 3: - // Path param - v.In = "path" - v.Required = true - default: - panic("unreachable") - } - - // descendant Items of Parameter may not be refs - cur := v.Items - for cur != nil { - cur.Ref = Ref{} - cur = cur.Items - } - } - }, - func(v *Schema, c fuzz.Continue) { - if c.RandBool() { - // file schema - c.Fuzz(&v.Default) - c.Fuzz(&v.Description) - c.Fuzz(&v.Example) - c.Fuzz(&v.ExternalDocs) - - c.Fuzz(&v.Format) - c.Fuzz(&v.ReadOnly) - c.Fuzz(&v.Required) - c.Fuzz(&v.Title) - v.Type = StringOrArray{"file"} - - } else { - // normal schema - c.Fuzz(&v.SchemaProps) - c.Fuzz(&v.SwaggerSchemaProps) - c.Fuzz(&v.VendorExtensible) - // c.Fuzz(&v.ExtraProps) - // ExtraProps will not roundtrip - gnostic throws out - // unrecognized keys - } - - // Not supported by official openapi v2 spec - // and stripped by k8s apiserver - v.ID = "" - v.AnyOf = nil - v.OneOf = nil - v.Not = nil - v.Nullable = false - v.AdditionalItems = nil - v.Schema = "" - v.PatternProperties = nil - v.Definitions = nil - v.Dependencies = nil - }, -} - -var SwaggerDiffOptions = []cmp.Option{ - // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields - cmp.Comparer(func(a Ref, b Ref) bool { - return a.String() == b.String() - }), -} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/k8s.io/kubernetes/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS b/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS deleted file mode 100644 index 688ea8bd0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -labels: - - sig/apps diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go deleted file mode 100644 index 60cff22b9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file should be consistent with pkg/api/v1/annotation_key_constants.go. - -package core - -const ( - // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy - // webhook backend fails. - ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - - // MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods - MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" - - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - // Deprecated: set a pod security context `seccompProfile` field. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - // Deprecated: set a container security context `seccompProfile` field. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. - // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. - SeccompProfileRuntimeDefault string = "runtime/default" - - // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. - // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. - DeprecatedSeccompProfileDockerDefault string = "docker/default" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // NonConvertibleAnnotationPrefix annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" - - kubectlPrefix = "kubectl.kubernetes.io/" - - // LastAppliedConfigAnnotation is the annotation used to store the previous - // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. - LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" - - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" - - // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that - // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') - // of the last change, of some Pod or Service object, that triggered the endpoints object change. - // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints - // controller at T1, and the Endpoints object was changed at T2, the - // EndpointsLastChangeTriggerTime would be set to T0. - // - // The "endpoints change trigger" here means any Pod or Service change that resulted in the - // Endpoints object change. - // - // Given the definition of the "endpoints change trigger", please note that this annotation will - // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the - // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's - // already set). - // - // This annotation will be used to compute the in-cluster network programming latency SLI, see - // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md - EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" - - // EndpointsOverCapacity will be set on an Endpoints resource when it - // exceeds the maximum capacity of 1000 addresses. Initially the Endpoints - // controller will set this annotation with a value of "warning". In a - // future release, the controller may set this annotation with a value of - // "truncated" to indicate that any addresses exceeding the limit of 1000 - // have been truncated from the Endpoints resource. - EndpointsOverCapacity = "endpoints.kubernetes.io/over-capacity" - - // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated - // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. - // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or - // CSI Backend for a volume plugin on a specific node. - MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" - - // PodDeletionCost can be used to set to an int32 that represent the cost of deleting - // a pod compared to other pods belonging to the same ReplicaSet. Pods with lower - // deletion cost are preferred to be deleted before pods with higher deletion cost. - // Note that this is honored on a best-effort basis, and so it does not offer guarantees on - // pod deletion order. - // The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted. - // - // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled. - PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" - - // DeprecatedAnnotationTopologyAwareHints can be used to enable or disable - // Topology Aware Hints for a Service. This may be set to "Auto" or - // "Disabled". Any other value is treated as "Disabled". This annotation has - // been deprecated in favor of the `service.kubernetes.io/topology-mode` - // annotation which also allows "Auto" and "Disabled", but is not limited to - // those (it's open ended to provide room for experimentation while we - // pursue configuration for topology via specification). When both - // `service.kubernetes.io/topology-aware-hints` and - // `service.kubernetes.io/topology-mode` annotations are set, the value of - // `service.kubernetes.io/topology-aware-hints` has precedence. - DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints" - - // AnnotationTopologyMode can be used to enable or disable Topology Aware - // Routing for a Service. Well known values are "Auto" and "Disabled". - // Implementations may choose to develop new topology approaches, exposing - // them with domain-prefixed values. For example, "example.com/lowest-rtt" - // could be a valid implementation-specific value for this annotation. These - // heuristics will often populate topology hints on EndpointSlices, but that - // is not a requirement. - AnnotationTopologyMode = "service.kubernetes.io/topology-mode" -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go deleted file mode 100644 index a404263e7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go +++ /dev/null @@ -1,502 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/apis/core" -) - -// IsHugePageResourceName returns true if the resource name has the huge page -// resource prefix. -func IsHugePageResourceName(name core.ResourceName) bool { - return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) -} - -// IsHugePageResourceValueDivisible returns true if the resource value of storage is -// integer multiple of page size. -func IsHugePageResourceValueDivisible(name core.ResourceName, quantity resource.Quantity) bool { - pageSize, err := HugePageSizeFromResourceName(name) - if err != nil { - return false - } - - if pageSize.Sign() <= 0 || pageSize.MilliValue()%int64(1000) != int64(0) { - return false - } - - return quantity.Value()%pageSize.Value() == 0 -} - -// IsQuotaHugePageResourceName returns true if the resource name has the quota -// related huge page resource prefix. -func IsQuotaHugePageResourceName(name core.ResourceName) bool { - return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) -} - -// HugePageResourceName returns a ResourceName with the canonical hugepage -// prefix prepended for the specified page size. The page size is converted -// to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { - return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) -} - -// HugePageSizeFromResourceName returns the page size for the specified huge page -// resource name. If the specified input is not a valid huge page resource name -// an error is returned. -func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { - if !IsHugePageResourceName(name) { - return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) - } - pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) - return resource.ParseQuantity(pageSize) -} - -// NonConvertibleFields iterates over the provided map and filters out all but -// any keys with the "non-convertible.kubernetes.io" prefix. -func NonConvertibleFields(annotations map[string]string) map[string]string { - nonConvertibleKeys := map[string]string{} - for key, value := range annotations { - if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { - nonConvertibleKeys[key] = value - } - } - return nonConvertibleKeys -} - -// Semantic can do semantic deep equality checks for core objects. -// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true -var Semantic = conversion.EqualitiesOrDie( - func(a, b resource.Quantity) bool { - // Ignore formatting, only care that numeric value stayed the same. - // TODO: if we decide it's important, it should be safe to start comparing the format. - // - // Uninitialized quantities are equivalent to 0 quantities. - return a.Cmp(b) == 0 - }, - func(a, b metav1.MicroTime) bool { - return a.UTC() == b.UTC() - }, - func(a, b metav1.Time) bool { - return a.UTC() == b.UTC() - }, - func(a, b labels.Selector) bool { - return a.String() == b.String() - }, - func(a, b fields.Selector) bool { - return a.String() == b.String() - }, -) - -var standardResourceQuotaScopes = sets.NewString( - string(core.ResourceQuotaScopeTerminating), - string(core.ResourceQuotaScopeNotTerminating), - string(core.ResourceQuotaScopeBestEffort), - string(core.ResourceQuotaScopeNotBestEffort), - string(core.ResourceQuotaScopePriorityClass), -) - -// IsStandardResourceQuotaScope returns true if the scope is a standard value -func IsStandardResourceQuotaScope(str string) bool { - return standardResourceQuotaScopes.Has(str) || str == string(core.ResourceQuotaScopeCrossNamespacePodAffinity) -} - -var podObjectCountQuotaResources = sets.NewString( - string(core.ResourcePods), -) - -var podComputeQuotaResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), -) - -// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { - switch scope { - case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort, - core.ResourceQuotaScopePriorityClass, core.ResourceQuotaScopeCrossNamespacePodAffinity: - return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case core.ResourceQuotaScopeBestEffort: - return podObjectCountQuotaResources.Has(resource) - default: - return true - } -} - -var standardContainerResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), -) - -// IsStandardContainerResourceName returns true if the container can make a resource request -// for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) -} - -// IsExtendedResourceName returns true if: -// 1. the resource name is not in the default namespace; -// 2. resource name does not have "requests." prefix, -// to avoid confusion with the convention in quota -// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name -func IsExtendedResourceName(name core.ResourceName) bool { - if IsNativeResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) { - return false - } - // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name - nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name)) - if errs := validation.IsQualifiedName(nameForQuota); len(errs) != 0 { - return false - } - return true -} - -// IsNativeResource returns true if the resource name is in the -// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are -// implicitly in the kubernetes.io/ namespace. -func IsNativeResource(name core.ResourceName) bool { - return !strings.Contains(string(name), "/") || - strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) -} - -// IsOvercommitAllowed returns true if the resource is in the default -// namespace and is not hugepages. -func IsOvercommitAllowed(name core.ResourceName) bool { - return IsNativeResource(name) && - !IsHugePageResourceName(name) -} - -var standardLimitRangeTypes = sets.NewString( - string(core.LimitTypePod), - string(core.LimitTypeContainer), - string(core.LimitTypePersistentVolumeClaim), -) - -// IsStandardLimitRangeType returns true if the type is Pod or Container -func IsStandardLimitRangeType(str string) bool { - return standardLimitRangeTypes.Has(str) -} - -var standardQuotaResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), - string(core.ResourceRequestsStorage), - string(core.ResourceRequestsEphemeralStorage), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceLimitsEphemeralStorage), - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceConfigMaps), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), -) - -// IsStandardQuotaResourceName returns true if the resource is known to -// the quota tracking system -func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) -} - -var standardResources = sets.NewString( - string(core.ResourceCPU), - string(core.ResourceMemory), - string(core.ResourceEphemeralStorage), - string(core.ResourceRequestsCPU), - string(core.ResourceRequestsMemory), - string(core.ResourceRequestsEphemeralStorage), - string(core.ResourceLimitsCPU), - string(core.ResourceLimitsMemory), - string(core.ResourceLimitsEphemeralStorage), - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourceConfigMaps), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceStorage), - string(core.ResourceRequestsStorage), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), -) - -// IsStandardResourceName returns true if the resource is known to the system -func IsStandardResourceName(str string) bool { - return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) -} - -var integerResources = sets.NewString( - string(core.ResourcePods), - string(core.ResourceQuotas), - string(core.ResourceServices), - string(core.ResourceReplicationControllers), - string(core.ResourceSecrets), - string(core.ResourceConfigMaps), - string(core.ResourcePersistentVolumeClaims), - string(core.ResourceServicesNodePorts), - string(core.ResourceServicesLoadBalancers), -) - -// IsIntegerResourceName returns true if the resource is measured in integer values -func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) -} - -// IsServiceIPSet aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *core.Service) bool { - // This function assumes that the service is semantically validated - // it does not test if the IP is valid, just makes sure that it is set. - return len(service.Spec.ClusterIP) > 0 && - service.Spec.ClusterIP != core.ClusterIPNone -} - -var standardFinalizers = sets.NewString( - string(core.FinalizerKubernetes), - metav1.FinalizerOrphanDependents, - metav1.FinalizerDeleteDependents, -) - -// IsStandardFinalizerName checks if the input string is a standard finalizer name -func IsStandardFinalizerName(str string) bool { - return standardFinalizers.Has(str) -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX,RWOP. -func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if ContainsAccessMode(modes, core.ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if ContainsAccessMode(modes, core.ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if ContainsAccessMode(modes, core.ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - if ContainsAccessMode(modes, core.ReadWriteOncePod) { - modesStr = append(modesStr, "RWOP") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesFromString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []core.PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, core.ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, core.ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, core.ReadWriteMany) - case s == "RWOP": - accessModes = append(accessModes, core.ReadWriteOncePod) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { - accessModes := []core.PersistentVolumeAccessMode{} - for _, m := range modes { - if !ContainsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func ContainsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -func ClaimContainsAllocatedResources(pvc *core.PersistentVolumeClaim) bool { - if pvc == nil { - return false - } - - if pvc.Status.AllocatedResources != nil { - return true - } - return false -} - -func ClaimContainsAllocatedResourceStatus(pvc *core.PersistentVolumeClaim) bool { - if pvc == nil { - return false - } - - if pvc.Status.AllocatedResourceStatuses != nil { - return true - } - return false -} - -// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in core. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { - var tolerations []core.Toleration - if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) - if err != nil { - return tolerations, err - } - } - return tolerations, nil -} - -// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. -// Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { - podTolerations := pod.Spec.Tolerations - - var newTolerations []core.Toleration - updated := false - for i := range podTolerations { - if toleration.MatchToleration(&podTolerations[i]) { - if Semantic.DeepEqual(toleration, podTolerations[i]) { - return false - } - newTolerations = append(newTolerations, *toleration) - updated = true - continue - } - - newTolerations = append(newTolerations, podTolerations[i]) - } - - if !updated { - newTolerations = append(newTolerations, *toleration) - } - - pod.Spec.Tolerations = newTolerations - return true -} - -// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in core. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { - var taints []core.Taint - if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) - if err != nil { - return []core.Taint{}, err - } - } - return taints, nil -} - -// GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *core.PersistentVolume) string { - // Use beta annotation first - if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { - return class - } - - return volume.Spec.StorageClassName -} - -// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was -// requested, it returns "". -func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { - // Use beta annotation first - if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { - return class - } - - if claim.Spec.StorageClassName != nil { - return *claim.Spec.StorageClassName - } - - return "" -} - -// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { - // Use beta annotation first - if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { - return true - } - - if claim.Spec.StorageClassName != nil { - return true - } - - return false -} - -// GetDeletionCostFromPodAnnotations returns the integer value of pod-deletion-cost. Returns 0 -// if not set or the value is invalid. -func GetDeletionCostFromPodAnnotations(annotations map[string]string) (int32, error) { - if value, exist := annotations[core.PodDeletionCost]; exist { - // values that start with plus sign (e.g, "+10") or leading zeros (e.g., "008") are not valid. - if !validFirstDigit(value) { - return 0, fmt.Errorf("invalid value %q", value) - } - - i, err := strconv.ParseInt(value, 10, 32) - if err != nil { - // make sure we default to 0 on error. - return 0, err - } - return int32(i), nil - } - return 0, nil -} - -func validFirstDigit(str string) bool { - if len(str) == 0 { - return false - } - return str[0] == '-' || (str[0] == '0' && str == "0") || (str[0] >= '1' && str[0] <= '9') -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go deleted file mode 100644 index 46702cb46..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import "encoding/json" - -// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations -// to prevent anyone from marshaling these internal structs. - -var _ = json.Marshaler(&AvoidPods{}) -var _ = json.Unmarshaler(&AvoidPods{}) - -// MarshalJSON panics to prevent marshalling of internal structs -func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } - -// UnmarshalJSON panics to prevent unmarshalling of internal structs -func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go deleted file mode 100644 index 60f7e8a88..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package core - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// SetGroupVersionKind sets the API version and kind of the object reference -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -// GroupVersionKind returns the API version and kind of the object reference -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -// GetObjectKind returns the kind of object reference -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go deleted file mode 100644 index 882f31795..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder object to register various known types - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - - // AddToScheme represents a func that can be used to apply all the registered - // funcs in a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go deleted file mode 100644 index bde1e24ca..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -func (rn ResourceName) String() string { - return string(rn) -} - -// CPU returns the CPU limit if specified. -func (rl *ResourceList) CPU() *resource.Quantity { - return rl.Name(ResourceCPU, resource.DecimalSI) -} - -// Memory returns the Memory limit if specified. -func (rl *ResourceList) Memory() *resource.Quantity { - return rl.Name(ResourceMemory, resource.BinarySI) -} - -// Storage returns the Storage limit if specified. -func (rl *ResourceList) Storage() *resource.Quantity { - return rl.Name(ResourceStorage, resource.BinarySI) -} - -// Pods returns the list of pods -func (rl *ResourceList) Pods() *resource.Quantity { - return rl.Name(ResourcePods, resource.DecimalSI) -} - -// StorageEphemeral returns the list of ephemeral storage volumes, if any -func (rl *ResourceList) StorageEphemeral() *resource.Quantity { - return rl.Name(ResourceEphemeralStorage, resource.BinarySI) -} - -// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format. -func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity { - if val, ok := (*rl)[name]; ok { - return &val - } - return &resource.Quantity{Format: defaultFormat} -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go deleted file mode 100644 index 2c800de9b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package core - -import "fmt" - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// ToString converts taint struct to string in format '=:', '=:', ':', or ''. -func (t *Taint) ToString() string { - if len(t.Effect) == 0 { - if len(t.Value) == 0 { - return fmt.Sprintf("%v", t.Key) - } - return fmt.Sprintf("%v=%v:", t.Key, t.Value) - } - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go deleted file mode 100644 index 1dfbc9f1b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package core - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go deleted file mode 100644 index 75c68af62..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go +++ /dev/null @@ -1,6148 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package core - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem = "kube-system" - // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic = "kube-public" - // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) - NamespaceNodeLease = "kube-node-lease" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - // +optional - VolumeSource -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // +optional - EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - // into the Pod's container. - // +optional - GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. - // +optional - Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - // +optional - Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - FlexVolume *FlexVolumeSource - - // Cinder represents a cinder volume attached and mounted on kubelet's host machine. - // +optional - Cinder *CinderVolumeSource - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - - // DownwardAPI represents metadata about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelet's host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelet's host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // +optional - StorageOS *StorageOSVolumeSource - // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. - // +optional - CSI *CSIVolumeSource - // Ephemeral represents a volume that is handled by a cluster storage driver. - // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - // and deleted when the pod is removed. - // - // Use this if: - // a) the volume is only needed while the pod runs, - // b) features of normal volumes like restoring from snapshot or capacity - // tracking are needed, - // c) the storage driver is specified through a storage class, and - // d) the storage driver supports dynamic volume provisioning through - // a PersistentVolumeClaim (see EphemeralVolumeSource for more - // information on the connection between this volume type - // and PersistentVolumeClaim). - // - // Use PersistentVolumeClaim or one of the vendor-specific - // APIs for volumes that persist for longer than the lifecycle - // of an individual pod. - // - // Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - // be used that way - see the documentation of the driver for - // more information. - // - // A pod can use both types of ephemeral volumes and - // persistent volumes at the same time. - // - // +optional - Ephemeral *EphemeralVolumeSource -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // +optional - HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - // +optional - Glusterfs *GlusterfsPersistentVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDPersistentVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIPersistentVolumeSource - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - FlexVolume *FlexPersistentVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelet's host machine. - // +optional - Cinder *CinderPersistentVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSPersistentVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFilePersistentVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelet's host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelet's host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelet's host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOPersistentVolumeSource - // Local represents directly-attached storage with node affinity - // +optional - Local *LocalVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://examples.k8s.io/volumes/storageos/README.md - // +optional - StorageOS *StorageOSPersistentVolumeSource - // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver. - // +optional - CSI *CSIPersistentVolumeSource -} - -// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - // +optional - ReadOnly bool -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's deprecated and will be removed in a future release. (#51440) - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - - // MountOptionAnnotation defines mount option annotation used in PVs - MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolume struct captures the details of the implementation of PV storage -type PersistentVolume struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines a persistent volume owned by the cluster - // +optional - Spec PersistentVolumeSpec - - // Status represents the current information about persistent volume. - // +optional - Status PersistentVolumeStatus -} - -// PersistentVolumeSpec has most of the details required to define a persistent volume -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList - // Source represents the location and type of a volume to mount. - PersistentVolumeSource - // AccessModes contains all ways the volume can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - // +optional - ClaimRef *ObjectReference - // Optional: what happens to a persistent volume when released from its claim. - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // +optional - MountOptions []string - // volumeMode defines if a volume is intended to be used with a formatted filesystem - // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // +optional - VolumeMode *PersistentVolumeMode - // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. - // This field influences the scheduling of pods that use this volume. - // +optional - NodeAffinity *VolumeNodeAffinity -} - -// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. -type VolumeNodeAffinity struct { - // Required specifies hard node constraints that must be met. - Required *NodeSelector -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - // DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. -type PersistentVolumeMode string - -const ( - // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. - PersistentVolumeBlock PersistentVolumeMode = "Block" - // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. - PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" -) - -// PersistentVolumeStatus represents the status of PV storage -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - // +optional - Phase PersistentVolumePhase - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - // +optional - Reason string - // LastPhaseTransitionTime is the time the phase transitioned from one to another - // and automatically resets to current time everytime a volume phase transitions. - // This is an alpha field and requires enabling PersistentVolumeLastPhaseTransitionTime feature. - // +featureGate=PersistentVolumeLastPhaseTransitionTime - // +optional - LastPhaseTransitionTime *metav1.Time -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeList represents a list of PVs -type PersistentVolumeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolume -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the volume requested by a pod author - // +optional - Spec PersistentVolumeClaimSpec - - // Status represents the current information about a claim - // +optional - Status PersistentVolumeClaimStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaimList represents the list of PV claims -type PersistentVolumeClaimList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolumeClaim -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - // +optional - AccessModes []PersistentVolumeAccessMode - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - // +optional - Selector *metav1.LabelSelector - // Resources represents the minimum resources required - // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - // that are lower than previous value but must still be higher than capacity recorded in the - // status field of the claim. - // +optional - Resources ResourceRequirements - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - // +optional - VolumeName string - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 - // +optional - StorageClassName *string - // volumeMode defines what type of volume is required by the claim. - // Value of Filesystem is implied when not included in claim spec. - // +optional - VolumeMode *PersistentVolumeMode - // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - // * An existing PVC (PersistentVolumeClaim) - // If the provisioner or an external controller can support the specified data source, - // it will create a new volume based on the contents of the specified data source. - // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - // If the namespace is specified, then dataSourceRef will not be copied to dataSource. - // +optional - DataSource *TypedLocalObjectReference - // Specifies the object from which to populate the volume with data, if a non-empty - // volume is desired. This may be any object from a non-empty API group (non - // core object) or a PersistentVolumeClaim object. - // When this field is specified, volume binding will only succeed if the type of - // the specified object matches some installed volume populator or dynamic - // provisioner. - // This field will replace the functionality of the dataSource field and as such - // if both fields are non-empty, they must have the same value. For backwards - // compatibility, when namespace isn't specified in dataSourceRef, - // both fields (dataSource and dataSourceRef) will be set to the same - // value automatically if one of them is empty and the other is non-empty. - // When namespace is specified in dataSourceRef, - // dataSource isn't set to the same value and must be empty. - // There are three important differences between dataSource and dataSourceRef: - // * While dataSource only allows two specific types of objects, dataSourceRef - // allows any non-core object, as well as PersistentVolumeClaim objects. - // * While dataSource ignores disallowed values (dropping them), dataSourceRef - // preserves all values, and generates an error if a disallowed value is - // specified. - // * While dataSource only allows local objects, dataSourceRef allows objects - // in any namespaces. - // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - // +optional - DataSourceRef *TypedObjectReference -} - -type TypedObjectReference struct { - // APIGroup is the group for the resource being referenced. - // If APIGroup is not specified, the specified Kind must be in the core API group. - // For any other third-party types, APIGroup is required. - // +optional - APIGroup *string - // Kind is the type of resource being referenced - Kind string - // Name is the name of resource being referenced - Name string - // Namespace is the namespace of resource being referenced - // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. - // +featureGate=CrossNamespaceVolumeDataSource - // +optional - Namespace *string -} - -// PersistentVolumeClaimConditionType defines the condition of PV claim. -// Valid values are either "Resizing" or "FileSystemResizePending". -type PersistentVolumeClaimConditionType string - -// These are valid conditions of Pvc -const ( - // An user trigger resize of pvc has been started - PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" - // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node - PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" -) - -// +enum -// When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource -// that it does not recognizes, then it should ignore that update and let other controllers -// handle it. -type ClaimResourceStatus string - -const ( - // State set when resize controller starts resizing the volume in control-plane - PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress" - - // State set when resize has failed in resize controller with a terminal error. - // Transient errors such as timeout should not set this status and should leave allocatedResourceStatus - // unmodified, so as resize controller can resume the volume expansion. - PersistentVolumeClaimControllerResizeFailed ClaimResourceStatus = "ControllerResizeFailed" - - // State set when resize controller has finished resizing the volume but further resizing of volume - // is needed on the node. - PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending" - // State set when kubelet starts resizing the volume. - PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress" - // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed - PersistentVolumeClaimNodeResizeFailed ClaimResourceStatus = "NodeResizeFailed" -) - -// PersistentVolumeClaimCondition represents the current condition of PV claim -type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// PersistentVolumeClaimStatus represents the status of PV claim -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - // +optional - Phase PersistentVolumeClaimPhase - // AccessModes contains all ways the volume backing the PVC can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // Represents the actual resources of the underlying volume - // +optional - Capacity ResourceList - // +optional - Conditions []PersistentVolumeClaimCondition - // AllocatedResources tracks the resources allocated to a PVC including its capacity. - // Key names follow standard Kubernetes label syntax. Valid values are either: - // * Un-prefixed keys: - // - storage - the capacity of the volume. - // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" - // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered - // reserved and hence may not be used. - // - // Capacity reported here may be larger than the actual capacity when a volume expansion operation - // is requested. - // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. - // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. - // If a volume expansion capacity request is lowered, allocatedResources is only - // lowered if there are no expansion operations in progress and if the actual volume capacity - // is equal or lower than the requested capacity. - // - // A controller that receives PVC update with previously unknown resourceName - // should ignore the update for the purpose it was designed. For example - a controller that - // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid - // resources associated with PVC. - // - // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. - // +featureGate=RecoverVolumeExpansionFailure - // +optional - AllocatedResources ResourceList - // AllocatedResourceStatuses stores status of resource being resized for the given PVC. - // Key names follow standard Kubernetes label syntax. Valid values are either: - // * Un-prefixed keys: - // - storage - the capacity of the volume. - // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource" - // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered - // reserved and hence may not be used. - // - // ClaimResourceStatus can be in any of following states: - // - ControllerResizeInProgress: - // State set when resize controller starts resizing the volume in control-plane. - // - ControllerResizeFailed: - // State set when resize has failed in resize controller with a terminal error. - // - NodeResizePending: - // State set when resize controller has finished resizing the volume but further resizing of - // volume is needed on the node. - // - NodeResizeInProgress: - // State set when kubelet starts resizing the volume. - // - NodeResizeFailed: - // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set - // NodeResizeFailed. - // For example: if expanding a PVC for more capacity - this field can be one of the following states: - // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress" - // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed" - // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending" - // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress" - // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed" - // When this field is not set, it means that no resize operation is in progress for the given PVC. - // - // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus - // should ignore the update for the purpose it was designed. For example - a controller that - // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid - // resources associated with PVC. - // - // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature. - // +featureGate=RecoverVolumeExpansionFailure - // +mapType=granular - // +optional - AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus -} - -// PersistentVolumeAccessMode defines various access modes for PV. -type PersistentVolumeAccessMode string - -// These are the valid values for PersistentVolumeAccessMode -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" - // can be mounted read/write mode to exactly 1 pod - // cannot be used in combination with other access modes - ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod" -) - -// PersistentVolumePhase defines the phase in which a PV is -type PersistentVolumePhase string - -// These are the valid values for PersistentVolumePhase -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -// PersistentVolumeClaimPhase defines the phase of PV claim -type PersistentVolumeClaimPhase string - -// These are the valid value for PersistentVolumeClaimPhase -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -// HostPathType defines the type of host path for PV -type HostPathType string - -// These are the valid values for HostPathType -const ( - // For backwards compatible, leave it empty if unset - HostPathUnset HostPathType = "" - // If nothing exists at the given path, an empty directory will be created there - // as needed with file mode 0755, having the same group and ownership with Kubelet. - HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" - // A directory must exist at the given path - HostPathDirectory HostPathType = "Directory" - // If nothing exists at the given path, an empty file will be created there - // as needed with file mode 0644, having the same group and ownership with Kubelet. - HostPathFileOrCreate HostPathType = "FileOrCreate" - // A file must exist at the given path - HostPathFile HostPathType = "File" - // A UNIX socket must exist at the given path - HostPathSocket HostPathType = "Socket" - // A character device must exist at the given path - HostPathCharDev HostPathType = "CharDevice" - // A block device must exist at the given path - HostPathBlockDev HostPathType = "BlockDevice" -) - -// HostPathVolumeSource represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // If the path is a symlink, it will follow the link to the real path. - Path string - // Defaults to "" - Type *HostPathType -} - -// EmptyDirVolumeSource represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisfies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // +optional - Medium StorageMedium - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - // +optional - SizeLimit *resource.Quantity -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -// These are the valid value for StorageMedium -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages - StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages- -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" - // ProtocolSCTP is the SCTP protocol. - ProtocolSCTP Protocol = "SCTP" -) - -// GCEPersistentDiskVolumeSource represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// ISCSIVolumeSource represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *LocalObjectReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// ISCSIPersistentVolumeSource represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIPersistentVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *SecretReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// FCVolumeSource represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) - // +optional - TargetWWNs []string - // Optional: FC target lun number - // +optional - Lun *int32 - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: FC volume World Wide Identifiers (WWIDs) - // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. - // +optional - WWIDs []string -} - -// FlexPersistentVolumeSource represents a generic persistent volume resource that is -// provisioned/attached using an exec based plugin. -type FlexPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// FlexVolumeSource represents a generic volume resource that is -// provisioned/attached using an exec based plugin. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// AWSElasticBlockStoreVolumeSource represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// GitRepoVolumeSource represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -// -// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an -// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir -// into the Pod's container. -type GitRepoVolumeSource struct { - // Repository URL - Repository string - // Commit hash, this is optional - // +optional - Revision string - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string - // TODO: Consider credentials here. -} - -// SecretVolumeSource adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // +optional - SecretName string - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// SecretProjection adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// NFSVolumeSource represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string - - // Path is the exported NFS share - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// QuobyteVolumeSource represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string - - // Volume is a string that references an already created Quobyte volume by name. - Volume string - - // Defaults to false (read/write). ReadOnly here will force - // the Quobyte to be mounted with read-only permissions - // +optional - ReadOnly bool - - // User to map volume access to - // Defaults to the root user - // +optional - User string - - // Group to map volume access to - // Default is no group - // +optional - Group string - - // Tenant owning the given Quobyte volume in the Backend - // Used with dynamically provisioned Quobyte volumes, value is set by the plugin - // +optional - Tenant string -} - -// GlusterfsVolumeSource represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string - - // Required: Path is the Glusterfs volume path - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// GlusterfsPersistentVolumeSource represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsPersistentVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - EndpointsName string - - // Path is the Glusterfs volume path. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - Path string - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - // +optional - ReadOnly bool - - // EndpointsNamespace is the namespace that contains Glusterfs endpoint. - // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - // +optional - EndpointsNamespace *string -} - -// RBDVolumeSource represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// RBDPersistentVolumeSource represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDPersistentVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume. - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - SecretRef *LocalObjectReference -} - -// CinderPersistentVolumeSource represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderPersistentVolumeSource struct { - // Unique id of the volume used to identify the cinder volume. - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - SecretRef *SecretReference -} - -// CephFSVolumeSource represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. - // +optional - Name string - // Namespace defines the space within which the secret name must be unique. - // +optional - Namespace string -} - -// CephFSPersistentVolumeSource represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// FlockerVolumeSource represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// DownwardAPIVolumeFile represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string - // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// DownwardAPIProjection represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile -} - -// AzureFileVolumeSource azureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// AzureFilePersistentVolumeSource represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - SecretNamespace *string -} - -// VsphereVirtualDiskVolumeSource represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Storage Policy Based Management (SPBM) profile name. - // +optional - StoragePolicyName string - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - StoragePolicyID string -} - -// PhotonPersistentDiskVolumeSource represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// AzureDataDiskCachingMode defines the caching mode for Azure data disk -type AzureDataDiskCachingMode string - -// AzureDataDiskKind defines the kind of Azure data disk -type AzureDataDiskKind string - -// Defines cache mode and kinds for Azure data disk -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" - - AzureSharedBlobDisk AzureDataDiskKind = "Shared" - AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" - AzureManagedDisk AzureDataDiskKind = "Managed" -) - -// AzureDiskVolumeSource represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string - // The URI of the data disk in the blob storage - DataDiskURI string - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool - // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - Kind *AzureDataDiskKind -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - ProtectionDomain string - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - StoragePool string - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - StorageMode string - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs". - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined -// by a an admin via a storage class, for instance. -type ScaleIOPersistentVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *SecretReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - ProtectionDomain string - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - StoragePool string - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - StorageMode string - // The name of a volume created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs". - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// StorageOSVolumeSource represents a StorageOS persistent volume resource. -type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *LocalObjectReference -} - -// StorageOSPersistentVolumeSource represents a StorageOS persistent volume resource. -type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *ObjectReference -} - -// ConfigMapVolumeSource adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the ConfigMap or its keys must be defined - // +optional - Optional *bool -} - -// ConfigMapProjection adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the ConfigMap or its keys must be defined - // +optional - Optional *bool -} - -// ServiceAccountTokenProjection represents a projected service account token -// volume. This projection can be used to insert a service account token into -// the pods runtime filesystem for use against APIs (Kubernetes API Server or -// otherwise). -type ServiceAccountTokenProjection struct { - // Audience is the intended audience of the token. A recipient of a token - // must identify itself with an identifier specified in the audience of the - // token, and otherwise should reject the token. The audience defaults to the - // identifier of the apiserver. - Audience string - // ExpirationSeconds is the requested duration of validity of the service - // account token. As the token approaches expiration, the kubelet volume - // plugin will proactively rotate the service account token. The kubelet will - // start trying to rotate the token if the token is older than 80 percent of - // its time to live or if the token is older than 24 hours.Defaults to 1 hour - // and must be at least 10 minutes. - ExpirationSeconds int64 - // Path is the path relative to the mount point of the file to project the - // token into. - Path string -} - -// ProjectedVolumeSource represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// VolumeProjection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - Secret *SecretProjection - // information about the downwardAPI data to project - DownwardAPI *DownwardAPIProjection - // information about the configMap data to project - ConfigMap *ConfigMapProjection - // information about the serviceAccountToken data to project - ServiceAccountToken *ServiceAccountTokenProjection -} - -// KeyToPath maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string - // Optional: mode bits to use on this file, should be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// LocalVolumeSource represents directly-attached storage with node affinity (Beta feature) -type LocalVolumeSource struct { - // The full path to the volume on the node. - // It can be either a directory or block device (disk, partition, ...). - Path string - - // Filesystem type to mount. - // It applies only when the Path is a block device. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified. - // +optional - FSType *string -} - -// CSIPersistentVolumeSource represents storage that is managed by an external CSI volume driver. -type CSIPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. - // Required. - Driver string - - // VolumeHandle is the unique volume name returned by the CSI volume - // plugin’s CreateVolume to refer to the volume on all subsequent calls. - // Required. - VolumeHandle string - - // Optional: The value to pass to ControllerPublishVolumeRequest. - // Defaults to false (read/write). - // +optional - ReadOnly bool - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // +optional - FSType string - - // Attributes of the volume to publish. - // +optional - VolumeAttributes map[string]string - - // ControllerPublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - ControllerPublishSecretRef *SecretReference - - // NodeStageSecretRef is a reference to the secret object containing sensitive - // information to pass to the CSI driver to complete the CSI NodeStageVolume - // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - NodeStageSecretRef *SecretReference - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - NodePublishSecretRef *SecretReference - - // ControllerExpandSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerExpandVolume call. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - ControllerExpandSecretRef *SecretReference - - // NodeExpandSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodeExpandVolume call. - // This is a beta field which is enabled default by CSINodeExpandSecret feature gate. - // This field is optional, may be omitted if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +featureGate=CSINodeExpandSecret - // +optional - NodeExpandSecretRef *SecretReference -} - -// CSIVolumeSource represents a source location of a volume to mount, managed by an external CSI driver -type CSIVolumeSource struct { - // Driver is the name of the CSI driver that handles this volume. - // Consult with your admin for the correct name as registered in the cluster. - // Required. - Driver string - - // Specifies a read-only configuration for the volume. - // Defaults to false (read/write). - // +optional - ReadOnly *bool - - // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". - // If not provided, the empty value is passed to the associated CSI driver - // which will determine the default filesystem to apply. - // +optional - FSType *string - - // VolumeAttributes stores driver-specific properties that are passed to the CSI - // driver. Consult your driver's documentation for supported values. - // +optional - VolumeAttributes map[string]string - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secret references are passed. - // +optional - NodePublishSecretRef *LocalObjectReference -} - -// EphemeralVolumeSource represents an ephemeral volume that is handled by a normal storage driver. -type EphemeralVolumeSource struct { - // VolumeClaimTemplate will be used to create a stand-alone PVC to provision the volume. - // The pod in which this EphemeralVolumeSource is embedded will be the - // owner of the PVC, i.e. the PVC will be deleted together with the - // pod. The name of the PVC will be `-` where - // `` is the name from the `PodSpec.Volumes` array - // entry. Pod validation will reject the pod if the concatenated name - // is not valid for a PVC (for example, too long). - // - // An existing PVC with that name that is not owned by the pod - // will *not* be used for the pod to avoid using an unrelated - // volume by mistake. Starting the pod is then blocked until - // the unrelated PVC is removed. If such a pre-created PVC is - // meant to be used by the pod, the PVC has to updated with an - // owner reference to the pod once the pod exists. Normally - // this should not be necessary, but it may be useful when - // manually reconstructing a broken cluster. - // - // This field is read-only and no changes will be made by Kubernetes - // to the PVC after it has been created. - // - // Required, must not be nil. - VolumeClaimTemplate *PersistentVolumeClaimTemplate -} - -// PersistentVolumeClaimTemplate is used to produce -// PersistentVolumeClaim objects as part of an EphemeralVolumeSource. -type PersistentVolumeClaimTemplate struct { - // ObjectMeta may contain labels and annotations that will be copied into the PVC - // when creating it. No other fields are allowed and will be rejected during - // validation. - // +optional - metav1.ObjectMeta - - // Spec for the PersistentVolumeClaim. The entire content is - // copied unchanged into the PVC that gets created from this - // template. The same fields as in a PersistentVolumeClaim - // are also valid here. - Spec PersistentVolumeClaimSpec -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - // +optional - Name string - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // +optional - HostPort int32 - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 - // Required: Supports "TCP", "UDP" and "SCTP" - // +optional - Protocol Protocol - // Optional: What host IP to bind the external port to. - // +optional - HostIP string -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string - // Optional: Defaults to false (read-write). - // +optional - ReadOnly bool - // Required. If the path is not an absolute path (e.g. some/path) it - // will be prepended with the appropriate root prefix for the operating - // system. On Linux this is '/', on Windows this is 'C:\'. - MountPath string - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationNone is used. - // This field is beta in 1.10. - // +optional - MountPropagation *MountPropagationMode - // Expanded path within the volume from which the container's volume should be mounted. - // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - // Defaults to "" (volume's root). - // SubPathExpr and SubPath are mutually exclusive. - // +optional - SubPathExpr string -} - -// MountPropagationMode describes mount propagation. -type MountPropagationMode string - -const ( - // MountPropagationNone means that the volume in a container will - // not receive new mounts from the host or other containers, and filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode corresponds to "private" in Linux terminology. - MountPropagationNone MountPropagationMode = "None" - // MountPropagationHostToContainer means that the volume in a container will - // receive new mounts from the host or other containers, but filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rslave" in Linux terminology). - MountPropagationHostToContainer MountPropagationMode = "HostToContainer" - // MountPropagationBidirectional means that the volume in a container will - // receive new mounts from the host or other containers, and its own mounts - // will be propagated from the container to the host or other containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rshared" in Linux terminology). - MountPropagationBidirectional MountPropagationMode = "Bidirectional" -) - -// VolumeDevice describes a mapping of a raw block device within a container. -type VolumeDevice struct { - // name must match the name of a persistentVolumeClaim in the pod - Name string - // devicePath is the path inside of the container that the device will be mapped to. - DevicePath string -} - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previously defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. Double $$ are - // reduced to a single $, which allows for escaping the $(VAR_NAME) - // syntax: i.e. "$$(VAR_NAME)" will produce the string literal - // "$(VAR_NAME)". Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Value string - // Optional: Specifies a source the value of this var should come from. - // +optional - ValueFrom *EnvVarSource -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector - // Selects a key of a secret in the pod's namespace. - // +optional - SecretKeyRef *SecretKeySelector -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string - // Required: Path of the field to select in the specified API version - FieldPath string -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string - // Required: resource to select - Resource string - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity -} - -// ConfigMapKeySelector selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference - // The key to select. - Key string - // Specify whether the ConfigMap or its key must be defined - // +optional - Optional *bool -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference - // The key of the secret to select from. Must be a valid secret key. - Key string - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. - // +optional - Prefix string - // The ConfigMap to select from. - // +optional - ConfigMapRef *ConfigMapEnvSource - // The Secret to select from. - // +optional - SecretRef *SecretEnvSource -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference - // Specify whether the Secret must be defined - // +optional - Optional *bool -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name. - // This will be canonicalized upon output, so case-variant names will be understood as the same header. - Name string - // The header field value - Value string -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - // +optional - Path string - // Required: Name or number of the port to access on the container. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - // +optional - Host string - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - // +optional - Scheme URIScheme - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - Host string -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // +optional - Command []string -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - ProbeHandler - // Length of time before health checking is activated. In seconds. - // +optional - InitialDelaySeconds int32 - // Length of time before health checking times out. In seconds. - // +optional - TimeoutSeconds int32 - // How often (in seconds) to perform the probe. - // +optional - PeriodSeconds int32 - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness and startup. - // +optional - SuccessThreshold int32 - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // +optional - FailureThreshold int32 - // Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - // value overrides the value provided by the pod spec. - // Value must be non-negative integer. The value zero indicates stop immediately via - // the kill signal (no opportunity to shut down). - // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - // +optional - TerminationGracePeriodSeconds *int64 -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// ResourceResizeRestartPolicy specifies how to handle container resource resize. -type ResourceResizeRestartPolicy string - -// These are the valid resource resize restart policy values: -const ( - // 'NotRequired' means Kubernetes will try to resize the container - // without restarting it, if possible. Kubernetes may however choose to - // restart the container if it is unable to actuate resize without a - // restart. For e.g. the runtime doesn't support restart-free resizing. - NotRequired ResourceResizeRestartPolicy = "NotRequired" - // 'RestartContainer' means Kubernetes will resize the container in-place - // by stopping and starting the container when new resources are applied. - // This is needed for legacy applications. For e.g. java apps using the - // -xmxN flag which are unable to use resized memory without restarting. - RestartContainer ResourceResizeRestartPolicy = "RestartContainer" -) - -// ContainerResizePolicy represents resource resize policy for the container. -type ContainerResizePolicy struct { - // Name of the resource to which this resource resize policy applies. - // Supported values: cpu, memory. - ResourceName ResourceName - // Restart policy to apply when specified resource is resized. - // If not specified, it defaults to NotRequired. - RestartPolicy ResourceResizeRestartPolicy -} - -// PreemptionPolicy describes a policy for if/when to preempt a pod. -type PreemptionPolicy string - -const ( - // PreemptLowerPriority means that pod can preempt other pods with lower priority. - PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" - // PreemptNever means that pod never preempts other pods with lower priority. - PreemptNever PreemptionPolicy = "Never" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability - // Removed capabilities - // +optional - Drop []Capability -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // +optional - Limits ResourceList - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - // +optional - Requests ResourceList - // Claims lists the names of resources, defined in spec.resourceClaims, - // that are used by this container. - // - // This is an alpha field and requires enabling the - // DynamicResourceAllocation feature gate. - // - // This field is immutable. It can only be set for containers. - // - // +featureGate=DynamicResourceAllocation - // +optional - Claims []ResourceClaim -} - -// ResourceClaim references one entry in PodSpec.ResourceClaims. -type ResourceClaim struct { - // Name must match the name of one entry in pod.spec.resourceClaims of - // the Pod where this field is used. It makes that resource available - // inside a container. - Name string -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The container image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - // of whether the variable exists or not. - // +optional - Command []string - // Optional: The container image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - // of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to the container runtime's default working directory. - // +optional - WorkingDir string - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Compute resource requirements. - // +optional - Resources ResourceRequirements - // Resources resize policy for the container. - // +featureGate=InPlacePodVerticalScaling - // +optional - ResizePolicy []ContainerResizePolicy - // RestartPolicy defines the restart behavior of individual containers in a pod. - // This field may only be set for init containers, and the only allowed value is "Always". - // For non-init containers or when this field is not specified, - // the restart behavior is defined by the Pod's restart policy and the container type. - // Setting the RestartPolicy as "Always" for the init container will have the following effect: - // this init container will be continually restarted on - // exit until all regular containers have terminated. Once all regular - // containers have completed, all init containers with restartPolicy "Always" - // will be shut down. This lifecycle differs from normal init containers and - // is often referred to as a "sidecar" container. Although this init - // container still starts in the init container sequence, it does not wait - // for the container to complete before proceeding to the next init - // container. Instead, the next init container starts immediately after this - // init container is started, or after any startupProbe has successfully - // completed. - // +featureGate=SidecarContainers - // +optional - RestartPolicy *ContainerRestartPolicy - // +optional - VolumeMounts []VolumeMount - // volumeDevices is the list of block devices to be used by the container. - // +optional - VolumeDevices []VolumeDevice - // +optional - LivenessProbe *Probe - // +optional - ReadinessProbe *Probe - // +optional - StartupProbe *Probe - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// ProbeHandler defines a specific action that should be taken in a probe. -// One and only one of the fields must be specified. -type ProbeHandler struct { - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // TCPSocket specifies an action involving a TCP port. - // +optional - TCPSocket *TCPSocketAction - - // GRPC specifies an action involving a GRPC port. - // +optional - GRPC *GRPCAction -} - -// LifecycleHandler defines a specific action that should be taken in a lifecycle -// hook. One and only one of the fields, except TCPSocket must be specified. -type LifecycleHandler struct { - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept - // for the backward compatibility. There are no validation of this field and - // lifecycle hooks will fail in runtime when tcp handler is specified. - // +optional - TCPSocket *TCPSocketAction -} - -type GRPCAction struct { - // Port number of the gRPC service. - // Note: Number must be in the range 1 to 65535. - Port int32 - - // Service is the name of the service to place in the gRPC HealthCheckRequest - // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - // - // If this is not specified, the default behavior is to probe the server's overall health status. - // +optional - Service *string -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - // +optional - PostStart *LifecycleHandler - // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness/startup probe failure, - // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The Pod's termination grace period countdown begins before the - // PreStop hook is executed. Regardless of the outcome of the handler, the - // container will eventually terminate within the Pod's termination grace - // period (unless delayed by finalizers). Other management of the container blocks until the hook completes - // or until the termination grace period is reached. - // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - // +optional - PreStop *LifecycleHandler -} - -// The below types are used by kube_client and api_server. - -// ConditionStatus defines conditions of resources -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// ContainerStateWaiting represents the waiting state of a container -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - // +optional - Reason string - // A human-readable message indicating details about why the container is in waiting state. - // +optional - Message string -} - -// ContainerStateRunning represents the running state of a container -type ContainerStateRunning struct { - // +optional - StartedAt metav1.Time -} - -// ContainerStateTerminated represents the terminated state of a container -type ContainerStateTerminated struct { - ExitCode int32 - // +optional - Signal int32 - // +optional - Reason string - // +optional - Message string - // +optional - StartedAt metav1.Time - // +optional - FinishedAt metav1.Time - // +optional - ContainerID string -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // +optional - Waiting *ContainerStateWaiting - // +optional - Running *ContainerStateRunning - // +optional - Terminated *ContainerStateTerminated -} - -// ContainerStatus contains details for the current status of this container. -type ContainerStatus struct { - // Name is a DNS_LABEL representing the unique name of the container. - // Each container in a pod must have a unique name across all container types. - // Cannot be updated. - Name string - // State holds details about the container's current condition. - // +optional - State ContainerState - // LastTerminationState holds the last termination state of the container to - // help debug container crashes and restarts. This field is not - // populated if the container is still running and RestartCount is 0. - // +optional - LastTerminationState ContainerState - // Ready specifies whether the container is currently passing its readiness check. - // The value will change as readiness probes keep executing. If no readiness - // probes are specified, this field defaults to true once the container is - // fully started (see Started field). - // - // The value is typically used to determine whether a container is ready to - // accept traffic. - Ready bool - // RestartCount holds the number of times the container has been restarted. - // Kubelet makes an effort to always increment the value, but there - // are cases when the state may be lost due to node restarts and then the value - // may be reset to 0. The value is never negative. - RestartCount int32 - // Image is the name of container image that the container is running. - // The container image may not match the image used in the PodSpec, - // as it may have been resolved by the runtime. - // More info: https://kubernetes.io/docs/concepts/containers/images. - Image string - // ImageID is the image ID of the container's image. The image ID may not - // match the image ID of the image used in the PodSpec, as it may have been - // resolved by the runtime. - ImageID string - // ContainerID is the ID of the container in the format '://'. - // Where type is a container runtime identifier, returned from Version call of CRI API - // (for example "containerd"). - // +optional - ContainerID string - // Started indicates whether the container has finished its postStart lifecycle hook - // and passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered - // successful. Resets to false when the container is restarted, or if kubelet - // loses state temporarily. In both cases, startup probes will run again. - // Is always true when no startupProbe is defined and container is running and - // has passed the postStart lifecycle hook. The null value must be treated the - // same as false. - // +optional - Started *bool - // AllocatedResources represents the compute resources allocated for this container by the - // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission - // and after successfully admitting desired pod resize. - // +featureGate=InPlacePodVerticalScaling - // +optional - AllocatedResources ResourceList - // Resources represents the compute resource requests and limits that have been successfully - // enacted on the running container after it has been started or has been successfully resized. - // +featureGate=InPlacePodVerticalScaling - // +optional - Resources *ResourceRequirements -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - // Deprecated in v1.21: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095) - PodUnknown PodPhase = "Unknown" -) - -// PodConditionType defines the condition of pod -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" - // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler - // skips scheduling the pod because one or more scheduling gates are still present. - PodReasonSchedulingGated = "SchedulingGated" - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" - // DisruptionTarget indicates the pod is about to be terminated due to a - // disruption (such as preemption, eviction API or garbage-collection). - DisruptionTarget PodConditionType = "DisruptionTarget" -) - -// PodCondition represents pod's condition -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// PodResizeStatus shows status of desired resize of a pod's containers. -type PodResizeStatus string - -const ( - // Pod resources resize has been requested and will be evaluated by node. - PodResizeStatusProposed PodResizeStatus = "Proposed" - // Pod resources resize has been accepted by node and is being actuated. - PodResizeStatusInProgress PodResizeStatus = "InProgress" - // Node cannot resize the pod at this time and will keep retrying. - PodResizeStatusDeferred PodResizeStatus = "Deferred" - // Requested pod resize is not feasible and will not be re-evaluated. - PodResizeStatusInfeasible PodResizeStatus = "Infeasible" -) - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -// These are valid restart policies -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// ContainerRestartPolicy is the restart policy for a single container. -// This may only be set for init containers and only allowed value is "Always". -type ContainerRestartPolicy string - -const ( - ContainerRestartPolicyAlways ContainerRestartPolicy = "Always" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" - - // DNSNone indicates that the pod should use empty DNS settings. DNS - // parameters such as nameservers and search paths should be defined via - // DNSConfig. - DNSNone DNSPolicy = "None" -) - -// NodeSelector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - // Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm -} - -// NodeSelectorTerm represents expressions and fields required to select nodes. -// A null or empty node selector term matches no objects. The requirements of -// them are ANDed. -// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. -type NodeSelectorTerm struct { - // A list of node selector requirements by node's labels. - MatchExpressions []NodeSelectorRequirement - // A list of node selector requirements by node's fields. - MatchFields []NodeSelectorRequirement -} - -// NodeSelectorRequirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// NodeSelectorOperator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -// These are valid values of NodeSelectorOperator -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// TopologySelectorTerm represents the result of label queries. -// A null or empty topology selector term matches no objects. -// The requirements of them are ANDed. -// It provides a subset of functionality as NodeSelectorTerm. -// This is an alpha feature and may change in the future. -type TopologySelectorTerm struct { - // A list of topology selector requirements by labels. - // +optional - MatchLabelExpressions []TopologySelectorLabelRequirement -} - -// TopologySelectorLabelRequirement is a selector that matches given label. -// This is an alpha feature and may change in the future. -type TopologySelectorLabelRequirement struct { - // The label key that the selector applies to. - Key string - // An array of string values. One value must match the label to be selected. - // Each entry in Values is ORed. - Values []string -} - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity -} - -// PodAffinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// PodAntiAffinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// WeightedPodAffinityTerm represents the weights of all of the matched WeightedPodAffinityTerm -// fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm -} - -// PodAffinityTerm defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector - // namespaces specifies a static list of namespace names that the term applies to. - // The term is applied to the union of the namespaces listed in this field - // and the ones selected by namespaceSelector. - // null or empty namespaces list and null namespaceSelector means "this pod's namespace". - // +optional - Namespaces []string - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - TopologyKey string - // A label query over the set of namespaces that the term applies to. - // The term is applied to the union of the namespaces selected by this field - // and the ones listed in the namespaces field. - // null selector and null or empty namespaces list means "this pod's namespace". - // An empty selector ({}) matches all namespaces. - // +optional - NamespaceSelector *metav1.LabelSelector -} - -// NodeAffinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm -} - -// PreferredSchedulingTerm represents an empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm -} - -// Taint represents taint that can be applied to the node. -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string - // Required. The taint value corresponding to the taint key. - // +optional - Value string - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded *metav1.Time -} - -// TaintEffect defines the effects of Taint -type TaintEffect string - -// These are valid values for TaintEffect -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// Toleration represents the toleration object that can be attached to a pod. -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 -} - -// TolerationOperator is the set of operators that can be used in a toleration. -type TolerationOperator string - -// These are valid values for TolerationOperator -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodReadinessGate contains the reference to a pod condition -type PodReadinessGate struct { - // ConditionType refers to a condition in the pod's condition list with matching type. - ConditionType PodConditionType -} - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume - // List of initialization containers belonging to the pod. - InitContainers []Container - // List of containers belonging to the pod. - Containers []Container - // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - // pod to perform user-initiated actions such as debugging. This list cannot be specified when - // creating a pod, and it cannot be modified by updating the pod spec. In order to add an - // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // +optional - EphemeralContainers []EphemeralContainer - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates stop immediately via the kill - // signal (no opportunity to shut down). - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - // +optional - DNSPolicy DNSPolicy - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. - // +optional - ImagePullSecrets []LocalObjectReference - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). - // In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). - // In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. - // If a pod does not have FQDN, this has no effect. - // +optional - SetHostnameAsFQDN *bool - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - HostAliases []HostAlias - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // +optional - PreemptionPolicy *PreemptionPolicy - // Specifies the DNS parameters of a pod. - // Parameters specified here will be merged to the generated DNS - // configuration based on DNSPolicy. - // +optional - DNSConfig *PodDNSConfig - // If specified, all readiness gates will be evaluated for pod readiness. - // A pod is ready when all its containers are ready AND - // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates - // +optional - ReadinessGates []PodReadinessGate - // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class - // +optional - RuntimeClassName *string - // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - // This field will be autopopulated at admission time by the RuntimeClass admission controller. If - // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - // The RuntimeClass admission controller will reject Pod create requests which have the overhead already - // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead - // +optional - Overhead ResourceList - // EnableServiceLinks indicates whether information about services should be injected into pod's - // environment variables, matching the syntax of Docker links. - // If not specified, the default is true. - // +optional - EnableServiceLinks *bool - // TopologySpreadConstraints describes how a group of pods ought to spread across topology - // domains. Scheduler will schedule pods in a way which abides by the constraints. - // All topologySpreadConstraints are ANDed. - // +optional - TopologySpreadConstraints []TopologySpreadConstraint - // Specifies the OS of the containers in the pod. - // Some pod and container fields are restricted if this is set. - // - // If the OS field is set to linux, the following fields must be unset: - // - securityContext.windowsOptions - // - // If the OS field is set to windows, following fields must be unset: - // - spec.hostPID - // - spec.hostIPC - // - spec.hostUsers - // - spec.securityContext.seLinuxOptions - // - spec.securityContext.seccompProfile - // - spec.securityContext.fsGroup - // - spec.securityContext.fsGroupChangePolicy - // - spec.securityContext.sysctls - // - spec.shareProcessNamespace - // - spec.securityContext.runAsUser - // - spec.securityContext.runAsGroup - // - spec.securityContext.supplementalGroups - // - spec.containers[*].securityContext.seLinuxOptions - // - spec.containers[*].securityContext.seccompProfile - // - spec.containers[*].securityContext.capabilities - // - spec.containers[*].securityContext.readOnlyRootFilesystem - // - spec.containers[*].securityContext.privileged - // - spec.containers[*].securityContext.allowPrivilegeEscalation - // - spec.containers[*].securityContext.procMount - // - spec.containers[*].securityContext.runAsUser - // - spec.containers[*].securityContext.runAsGroup - // +optional - OS *PodOS - - // SchedulingGates is an opaque list of values that if specified will block scheduling the pod. - // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the - // scheduler will not attempt to schedule the pod. - // - // SchedulingGates can only be set at pod creation time, and be removed only afterwards. - // - // This is a beta feature enabled by the PodSchedulingReadiness feature gate. - // - // +featureGate=PodSchedulingReadiness - // +optional - SchedulingGates []PodSchedulingGate - // ResourceClaims defines which ResourceClaims must be allocated - // and reserved before the Pod is allowed to start. The resources - // will be made available to those containers which consume them - // by name. - // - // This is an alpha field and requires enabling the - // DynamicResourceAllocation feature gate. - // - // This field is immutable. - // - // +featureGate=DynamicResourceAllocation - // +optional - ResourceClaims []PodResourceClaim -} - -// PodResourceClaim references exactly one ResourceClaim through a ClaimSource. -// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. -// Containers that need access to the ResourceClaim reference it with this name. -type PodResourceClaim struct { - // Name uniquely identifies this resource claim inside the pod. - // This must be a DNS_LABEL. - Name string - - // Source describes where to find the ResourceClaim. - Source ClaimSource -} - -// ClaimSource describes a reference to a ResourceClaim. -// -// Exactly one of these fields should be set. Consumers of this type must -// treat an empty object as if it has an unknown value. -type ClaimSource struct { - // ResourceClaimName is the name of a ResourceClaim object in the same - // namespace as this pod. - ResourceClaimName *string - - // ResourceClaimTemplateName is the name of a ResourceClaimTemplate - // object in the same namespace as this pod. - // - // The template will be used to create a new ResourceClaim, which will - // be bound to this pod. When this pod is deleted, the ResourceClaim - // will also be deleted. The pod name and resource name, along with a - // generated component, will be used to form a unique name for the - // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - // - // This field is immutable and no changes will be made to the - // corresponding ResourceClaim by the control plane after creating the - // ResourceClaim. - ResourceClaimTemplateName *string -} - -// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim -// which references a ResourceClaimTemplate. It stores the generated name for -// the corresponding ResourceClaim. -type PodResourceClaimStatus struct { - // Name uniquely identifies this resource claim inside the pod. - // This must match the name of an entry in pod.spec.resourceClaims, - // which implies that the string must be a DNS_LABEL. - Name string - - // ResourceClaimName is the name of the ResourceClaim that was - // generated for the Pod in the namespace of the Pod. It this is - // unset, then generating a ResourceClaim was not necessary. The - // pod.spec.resourceClaims entry can be ignored in this case. - ResourceClaimName *string -} - -// OSName is the set of OS'es that can be used in OS. -type OSName string - -// These are valid values for OSName -const ( - Linux OSName = "linux" - Windows OSName = "windows" -) - -// PodOS defines the OS parameters of a pod. -type PodOS struct { - // Name is the name of the operating system. The currently supported values are linux and windows. - // Additional value may be defined in future and can be one of: - // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration - // Clients should expect to handle additional values and treat unrecognized values in this field as os: null - Name OSName -} - -// PodSchedulingGate is associated to a Pod to guard its scheduling. -type PodSchedulingGate struct { - // Name of the scheduling gate. - // Each scheduling gate must have a unique name field. - Name string -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -type HostAlias struct { - IP string - Hostnames []string -} - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string - // Value of a property to set - Value string -} - -// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume -// when volume is mounted. -type PodFSGroupChangePolicy string - -const ( - // FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed - // only when permission and ownership of root directory does not match with expected - // permissions on the volume. This can help shorten the time it takes to change - // ownership and permissions of a volume. - FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch" - // FSGroupChangeAlways indicates that volume's ownership and permissions - // should always be changed whenever volume is mounted inside a Pod. This the default - // behavior. - FSGroupChangeAlways PodFSGroupChangePolicy = "Always" -) - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - // +k8s:conversion-gen=false - // +optional - HostNetwork bool - // Use the host's pid namespace. - // Optional: Default to false. - // Note that this field cannot be set when spec.os.name is windows. - // +k8s:conversion-gen=false - // +optional - HostPID bool - // Use the host's ipc namespace. - // Optional: Default to false. - // Note that this field cannot be set when spec.os.name is windows. - // +k8s:conversion-gen=false - // +optional - HostIPC bool - // Share a single process namespace between all of the containers in a pod. - // When this is set containers will be able to view and signal processes from other containers - // in the same pod, and the first process in each container will not be assigned PID 1. - // HostPID and ShareProcessNamespace cannot both be set. - // Note that this field cannot be set when spec.os.name is windows. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - ShareProcessNamespace *bool - // Use the host's user namespace. - // Optional: Default to true. - // If set to true or not present, the pod will be run in the host user namespace, useful - // for when the pod needs a feature only available to the host user namespace, such as - // loading a kernel module with CAP_SYS_MODULE. - // When set to false, a new user namespace is created for the pod. Setting false is useful - // for mitigating container breakout vulnerabilities even allowing users to run their - // containers as root without actually having root privileges on the host. - // Note that this field cannot be set when spec.os.name is windows. - // +k8s:conversion-gen=false - // +optional - HostUsers *bool - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - SELinuxOptions *SELinuxOptions - // The Windows specific settings applied to all containers. - // If unspecified, the options within a container's SecurityContext will be used. - // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - // Note that this field cannot be set when spec.os.name is linux. - // +optional - WindowsOptions *WindowsSecurityContextOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - RunAsUser *int64 - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - RunAsGroup *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsNonRoot *bool - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID, the fsGroup (if specified), and group memberships - // defined in the container image for the uid of the container process. If unspecified, - // no additional groups are added to any container. Note that group memberships - // defined in the container image for the uid of the container process are still effective, - // even if they are not included in this list. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - SupplementalGroups []int64 - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - FSGroup *int64 - // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - // before being exposed inside Pod. This field will only apply to - // volume types which support fsGroup based ownership(and permissions). - // It will have no effect on ephemeral volume types such as: secret, configmaps - // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - FSGroupChangePolicy *PodFSGroupChangePolicy - // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - // sysctls (by the container runtime) might fail to launch. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - Sysctls []Sysctl - // The seccomp options to use by the containers in this pod. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - SeccompProfile *SeccompProfile -} - -// SeccompProfile defines a pod/container's seccomp profile settings. -// Only one profile source may be set. -// +union -type SeccompProfile struct { - // +unionDiscriminator - Type SeccompProfileType - // Load a profile defined in static file on the node. - // The profile must be preconfigured on the node to work. - // LocalhostProfile cannot be an absolute nor a descending path. - // +optional - LocalhostProfile *string -} - -// SeccompProfileType defines the supported seccomp profile types. -type SeccompProfileType string - -const ( - // SeccompProfileTypeUnconfined is when no seccomp profile is applied (A.K.A. unconfined). - SeccompProfileTypeUnconfined SeccompProfileType = "Unconfined" - // SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile. - SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault" - // SeccompProfileTypeLocalhost represents custom made profiles stored on the node's disk. - SeccompProfileTypeLocalhost SeccompProfileType = "Localhost" -) - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -// These are valid values for PodQOSClass -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodDNSConfig defines the DNS parameters of a pod in addition to -// those generated from DNSPolicy. -type PodDNSConfig struct { - // A list of DNS name server IP addresses. - // This will be appended to the base nameservers generated from DNSPolicy. - // Duplicated nameservers will be removed. - // +optional - Nameservers []string - // A list of DNS search domains for host-name lookup. - // This will be appended to the base search paths generated from DNSPolicy. - // Duplicated search paths will be removed. - // +optional - Searches []string - // A list of DNS resolver options. - // This will be merged with the base options generated from DNSPolicy. - // Duplicated entries will be removed. Resolution options given in Options - // will override those that appear in the base DNSPolicy. - // +optional - Options []PodDNSConfigOption -} - -// PodDNSConfigOption defines DNS resolver options of a pod. -type PodDNSConfigOption struct { - // Required. - Name string - // +optional - Value *string -} - -// PodIP represents a single IP address allocated to the pod. -type PodIP struct { - // IP is the IP address assigned to the pod - IP string -} - -// HostIP represents a single IP address allocated to the host. -type HostIP struct { - // IP is the IP address assigned to the host - IP string -} - -// EphemeralContainerCommon is a copy of all fields in Container to be inlined in -// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer -// to Container and allows separate documentation for the fields of EphemeralContainer. -// When a new field is added to Container it must be added here as well. -type EphemeralContainerCommon struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The container image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - // of whether the variable exists or not. - // +optional - Command []string - // Optional: The container image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced - // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will - // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless - // of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to the container runtime's default working directory. - // +optional - WorkingDir string - // Ports are not allowed for ephemeral containers. - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - // already allocated to the pod. - // +optional - Resources ResourceRequirements - // Resources resize policy for the container. - // +featureGate=InPlacePodVerticalScaling - // +optional - ResizePolicy []ContainerResizePolicy - // Restart policy for the container to manage the restart behavior of each - // container within a pod. - // This may only be set for init containers. You cannot set this field on - // ephemeral containers. - // +featureGate=SidecarContainers - // +optional - RestartPolicy *ContainerRestartPolicy - // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. - // +optional - VolumeMounts []VolumeMount - // volumeDevices is the list of block devices to be used by the container. - // +optional - VolumeDevices []VolumeDevice - // Probes are not allowed for ephemeral containers. - // +optional - LivenessProbe *Probe - // Probes are not allowed for ephemeral containers. - // +optional - ReadinessProbe *Probe - // Probes are not allowed for ephemeral containers. - // +optional - StartupProbe *Probe - // Lifecycle is not allowed for ephemeral containers. - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the ephemeral container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// EphemeralContainerCommon converts to Container. All fields must be kept in sync between -// these two types. -var _ = Container(EphemeralContainerCommon{}) - -// An EphemeralContainer is a temporary container that you may add to an existing Pod for -// user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a Pod is -// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the -// Pod to exceed its resource allocation. -// -// To add an ephemeral container, use the ephemeralcontainers subresource of an existing -// Pod. Ephemeral containers may not be removed or restarted. -type EphemeralContainer struct { - // Ephemeral containers have all of the fields of Container, plus additional fields - // specific to ephemeral containers. Fields in common with Container are in the - // following inlined struct so than an EphemeralContainer may easily be converted - // to a Container. - EphemeralContainerCommon - - // If set, the name of the container from PodSpec that this ephemeral container targets. - // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container uses the namespaces configured in the Pod spec. - // - // The container runtime must implement support for this feature. If the runtime does not - // support namespace targeting then the result of setting this field is undefined. - // +optional - TargetContainerName string -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' - // +optional - Reason string - // nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be - // scheduled right away as preemption victims receive their graceful termination periods. - // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide - // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to - // give the resources on this node to a higher priority pod that is created after preemption. - // +optional - NominatedNodeName string - - // HostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. - // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will - // not be updated even if there is a node is assigned to pod - // +optional - HostIP string - - // HostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must - // match the hostIP field. This list is empty if the pod has not started yet. - // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will - // not be updated even if there is a node is assigned to this pod. - // match the hostIP field. This list is empty if no IPs have been allocated yet. - // +optional - HostIPs []HostIP - - // PodIPs holds all of the known IP addresses allocated to the pod. Pods may be assigned AT MOST - // one value for each of IPv4 and IPv6. - // +optional - PodIPs []PodIP - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time - // +optional - QOSClass PodQOSClass - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status - InitContainerStatuses []ContainerStatus - // The list has one entry per app container in the manifest. - // +optional - ContainerStatuses []ContainerStatus - - // Status for any ephemeral containers that have run in this pod. - // +optional - EphemeralContainerStatuses []ContainerStatus - - // Status of resources resize desired for pod's containers. - // It is empty if no resources resize is pending. - // Any changes to container resources will automatically set this to "Proposed" - // +featureGate=InPlacePodVerticalScaling - // +optional - Resize PodResizeStatus - - // Status of resource claims. - // +featureGate=DynamicResourceAllocation - // +optional - ResourceClaimStatuses []PodResourceClaimStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Template defines the pods that will be created from this pod template - // +optional - Template PodTemplateSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodTemplate -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - // +optional - // TemplateRef *ObjectReference - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - // The only allowed template.spec.restartPolicy value is "Always". - // +optional - Template *PodTemplateSpec -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replication controller's current state. - // +optional - Conditions []ReplicationControllerCondition -} - -// ReplicationControllerConditionType defines the conditions of a replication controller. -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this replication controller. - // +optional - Spec ReplicationControllerSpec - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - // +optional - Status ReplicationControllerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicationController -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Service -} - -// ServiceAffinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -const ( - // DefaultClientIPServiceAffinitySeconds is the default timeout seconds - // of Client IP based session affinity - 3 hours. - DefaultClientIPServiceAffinitySeconds int32 = 10800 - // MaxClientIPServiceAffinitySeconds is the max timeout seconds - // of Client IP based session affinity - 1 day. - MaxClientIPServiceAffinitySeconds int32 = 86400 -) - -// SessionAffinityConfig represents the configurations of session affinity. -type SessionAffinityConfig struct { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - ClientIP *ClientIPConfig -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -type ClientIPConfig struct { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - TimeoutSeconds *int32 -} - -// ServiceType string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// ServiceInternalTrafficPolicy describes the endpoint-selection policy for -// traffic sent to the ClusterIP. -type ServiceInternalTrafficPolicy string - -const ( - // ServiceInternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster" - - // ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same - // node as the traffic was received on (dropping the traffic if there are no - // local endpoints). - ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local" -) - -// ServiceExternalTrafficPolicy describes the endpoint-selection policy for -// traffic to external service entrypoints (NodePorts, ExternalIPs, and -// LoadBalancer IPs). -type ServiceExternalTrafficPolicy string - -const ( - // ServiceExternalTrafficPolicyCluster routes traffic to all endpoints. - ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster" - - // ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by - // routing only to endpoints on the same node as the traffic was received on - // (dropping the traffic if there are no local endpoints). - ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local" -) - -// These are the valid conditions of a service. -const ( - // LoadBalancerPortsError represents the condition of the requested ports - // on the cloud load balancer instance. - LoadBalancerPortsError = "LoadBalancerPortsError" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus - - // Current service condition - // +optional - Conditions []metav1.Condition -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string - - // Ports is a list of records of service ports - // If used, every port defined in the service should have an entry in it - // +optional - Ports []PortStatus -} - -// IPFamily represents the IP Family (IPv4 or IPv6). This type is used -// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies). -type IPFamily string - -const ( - // IPv4Protocol indicates that this IP is IPv4 protocol - IPv4Protocol IPFamily = "IPv4" - // IPv6Protocol indicates that this IP is IPv6 protocol - IPv6Protocol IPFamily = "IPv6" -) - -// IPFamilyPolicy represents the dual-stack-ness requested or required by a Service -type IPFamilyPolicy string - -const ( - // IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily. - // The IPFamily assigned is based on the default IPFamily used by the cluster - // or as identified by service.spec.ipFamilies field - IPFamilyPolicySingleStack IPFamilyPolicy = "SingleStack" - // IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when - // the cluster is configured for dual-stack. If the cluster is not configured - // for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not - // set in service.spec.ipFamilies then the service will be assigned the default IPFamily - // configured on the cluster - IPFamilyPolicyPreferDualStack IPFamilyPolicy = "PreferDualStack" - // IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using - // IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The - // IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If - // service.spec.ipFamilies was not provided then it will be assigned according to how they are - // configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative - // IPFamily will be added by apiserver - IPFamilyPolicyRequireDualStack IPFamilyPolicy = "RequireDualStack" -) - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - Type ServiceType - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - Selector map[string]string - - // ClusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - ClusterIP string - - // ClusterIPs identifies all the ClusterIPs assigned to this - // service. ClusterIPs are assigned or reserved based on the values of - // service.spec.ipFamilies. A maximum of two entries (dual-stack IPs) are - // allowed in ClusterIPs. The IPFamily of each ClusterIP must match - // values provided in service.spec.ipFamilies. Clients using ClusterIPs must - // keep it in sync with ClusterIP (if provided) by having ClusterIP matching - // first element of ClusterIPs. - // +optional - ClusterIPs []string - - // IPFamilies identifies all the IPFamilies assigned for this Service. If a value - // was not provided for IPFamilies it will be defaulted based on the cluster - // configuration and the value of service.spec.ipFamilyPolicy. A maximum of two - // values (dual-stack IPFamilies) are allowed in IPFamilies. IPFamilies field is - // conditionally mutable: it allows for adding or removing a secondary IPFamily, - // but it does not allow changing the primary IPFamily of the service. - // +optional - IPFamilies []IPFamily - - // IPFamilyPolicy represents the dual-stack-ness requested or required by this - // Service. If there is no value provided, then this Service will be considered - // SingleStack (single IPFamily). Services can be SingleStack (single IPFamily), - // PreferDualStack (two dual-stack IPFamilies on dual-stack clusters or single - // IPFamily on single-stack clusters), or RequireDualStack (two dual-stack IPFamilies - // on dual-stack configured clusters, otherwise fail). The IPFamilies and ClusterIPs assigned - // to this service can be controlled by service.spec.ipFamilies and service.spec.clusterIPs - // respectively. - // +optional - IPFamilyPolicy *IPFamilyPolicy - - // ExternalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. - ExternalName string - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - // +optional - ExternalIPs []string - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations. - // Using it is non-portable and it may not support dual-stack. - // Users are encouraged to use implementation-specific annotations when available. - // +optional - LoadBalancerIP string - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - // +optional - SessionAffinity ServiceAffinity - - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - SessionAffinityConfig *SessionAffinityConfig - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // +optional - LoadBalancerSourceRanges []string - - // externalTrafficPolicy describes how nodes distribute service traffic they - // receive on one of the Service's "externally-facing" addresses (NodePorts, - // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure - // the service in a way that assumes that external load balancers will take care - // of balancing the service traffic between nodes, and so each node will deliver - // traffic only to the node-local endpoints of the service, without masquerading - // the client source IP. (Traffic mistakenly sent to a node with no endpoints will - // be dropped.) The default value, "Cluster", uses the standard behavior of - // routing to all endpoints evenly (possibly modified by topology and other - // features). Note that traffic sent to an External IP or LoadBalancer IP from - // within the cluster will always get "Cluster" semantics, but clients sending to - // a NodePort from within the cluster may need to take traffic policy into account - // when picking a node. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicy - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - HealthCheckNodePort int32 - - // publishNotReadyAddresses indicates that any agent which deals with endpoints for this - // Service should disregard any indications of ready/not-ready. - // The primary use case for setting this field is for a StatefulSet's Headless Service to - // propagate SRV DNS records for its Pods for the purpose of peer discovery. - // The Kubernetes controllers that generate Endpoints and EndpointSlice resources for - // Services interpret this to mean that all endpoints are considered "ready" even if the - // Pods themselves are not. Agents which consume only Kubernetes generated endpoints - // through the Endpoints or EndpointSlice resources can safely assume this behavior. - // +optional - PublishNotReadyAddresses bool - - // allocateLoadBalancerNodePorts defines if NodePorts will be automatically - // allocated for services with type LoadBalancer. Default is "true". It - // may be set to "false" if the cluster load-balancer does not rely on - // NodePorts. If the caller requests specific NodePorts (by specifying a - // value), those requests will be respected, regardless of this field. - // This field may only be set for services with type LoadBalancer and will - // be cleared if the type is changed to any other type. - // +optional - AllocateLoadBalancerNodePorts *bool - - // loadBalancerClass is the class of the load balancer implementation this Service belongs to. - // If specified, the value of this field must be a label-style identifier, with an optional prefix, - // e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. - // This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load - // balancer implementation is used, today this is typically done through the cloud provider integration, - // but should apply for any default implementation. If set, it is assumed that a load balancer - // implementation is watching for Services with a matching class. Any default load balancer - // implementation (e.g. cloud providers) should ignore Services that set this field. - // This field can only be set when creating or updating a Service to type 'LoadBalancer'. - // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. - // +optional - LoadBalancerClass *string - - // InternalTrafficPolicy describes how nodes distribute service traffic they - // receive on the ClusterIP. If set to "Local", the proxy will assume that pods - // only want to talk to endpoints of the service on the same node as the pod, - // dropping the traffic if there are no local endpoints. The default value, - // "Cluster", uses the standard behavior of routing to all endpoints evenly - // (possibly modified by topology and other features). - // +optional - InternalTrafficPolicy *ServiceInternalTrafficPolicy -} - -// ServicePort represents the port on which the service is exposed -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string - - // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - Protocol Protocol - - // The application protocol for this port. - // This is used as a hint for implementations to offer richer behavior for protocols that they understand. - // This field follows standard Kubernetes label syntax. - // Valid values are either: - // - // * Un-prefixed protocol names - reserved for IANA standard service names (as per - // RFC-6335 and https://www.iana.org/assignments/service-names). - // - // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 - // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 - // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - // - // * Other protocols should use implementation-defined prefixed names such as - // mycompany.com/my-custom-protocol. - // +optional - AppProtocol *string - - // The port that will be exposed on the service. - Port int32 - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a service. - // +optional - Spec ServiceSpec - - // Status represents the current status of a service. - // +optional - Status ServiceStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. - // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true". - // This field should not be used to find auto-generated service account token secrets for use outside of pods. - // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. - Secrets []ObjectReference - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // +optional - ImagePullSecrets []LocalObjectReference - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ServiceAccount -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// -// The resulting set of endpoints can be viewed as: -// -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), - // or link-local multicast (224.0.0.0/24 or ff02::/16). - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - // +optional - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol - - // The application protocol for this port. - // This is used as a hint for implementations to offer richer behavior for protocols that they understand. - // This field follows standard Kubernetes label syntax. - // Valid values are either: - // - // * Un-prefixed protocol names - reserved for IANA standard service names (as per - // RFC-6335 and https://www.iana.org/assignments/service-names). - // - // * Kubernetes-defined prefixed names: - // * 'kubernetes.io/h2c' - HTTP/2 over cleartext as described in https://www.rfc-editor.org/rfc/rfc7540 - // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 - // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 - // - // * Other protocols should use implementation-defined prefixed names such as - // mycompany.com/my-custom-protocol. - // +optional - AppProtocol *string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Endpoints -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. It may - // contain AT MOST one value for each of IPv4 and IPv6. - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - // +optional - PodCIDRs []string - - // ID of the node assigned by the cloud provider - // Note: format is "://" - // +optional - ProviderID string - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - // +optional - Unschedulable bool - - // If specified, the node's taints. - // +optional - Taints []Taint - - // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed. - // +optional - ConfigSource *NodeConfigSource - - // Deprecated. Not all kubelets will set this field. Remove field after 1.13. - // see: https://issues.k8s.io/61966 - // +optional - DoNotUseExternalID string -} - -// Deprecated: NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. -type NodeConfigSource struct { - ConfigMap *ConfigMapNodeConfigSource -} - -// Deprecated: ConfigMapNodeConfigSource represents the config map of a node -type ConfigMapNodeConfigSource struct { - // Namespace is the metadata.namespace of the referenced ConfigMap. - // This field is required in all cases. - Namespace string - - // Name is the metadata.name of the referenced ConfigMap. - // This field is required in all cases. - Name string - - // UID is the metadata.UID of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - UID types.UID - - // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - ResourceVersion string - - // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure - // This field is required in all cases. - KubeletConfigKey string -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercase for backwards compatibility (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid - SystemUUID string - // Boot ID reported by the node. - BootID string - // Kernel Version reported by the node. - KernelVersion string - // OS Image reported by the node. - OSImage string - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string - // Kubelet Version reported by the node. - KubeletVersion string - // KubeProxy Version reported by the node. - KubeProxyVersion string - // The Operating System reported by the node - OperatingSystem string - // The Architecture reported by the node - Architecture string -} - -// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. -type NodeConfigStatus struct { - // Assigned reports the checkpointed config the node will try to use. - // When Node.Spec.ConfigSource is updated, the node checkpoints the associated - // config payload to local disk, along with a record indicating intended - // config. The node refers to this record to choose its config checkpoint, and - // reports this record in Assigned. Assigned only updates in the status after - // the record has been checkpointed to disk. When the Kubelet is restarted, - // it tries to make the Assigned config the Active config by loading and - // validating the checkpointed payload identified by Assigned. - // +optional - Assigned *NodeConfigSource - // Active reports the checkpointed config the node is actively using. - // Active will represent either the current version of the Assigned config, - // or the current LastKnownGood config, depending on whether attempting to use the - // Assigned config results in an error. - // +optional - Active *NodeConfigSource - // LastKnownGood reports the checkpointed config the node will fall back to - // when it encounters an error attempting to use the Assigned config. - // The Assigned config becomes the LastKnownGood config when the node determines - // that the Assigned config is stable and correct. - // This is currently implemented as a 10-minute soak period starting when the local - // record of Assigned config is updated. If the Assigned config is Active at the end - // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is - // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, - // because the local default config is always assumed good. - // You should not make assumptions about the node's method of determining config stability - // and correctness, as this may change or become configurable in the future. - // +optional - LastKnownGood *NodeConfigSource - // Error describes any problems reconciling the Spec.ConfigSource to the Active config. - // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned - // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting - // to load or validate the Assigned config, etc. - // Errors may occur at different points while syncing config. Earlier errors (e.g. download or - // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across - // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in - // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error - // by fixing the config assigned in Spec.ConfigSource. - // You can find additional information for debugging by searching the error message in the Kubelet log. - // Error is a human-readable description of the error state; machines can check whether or not Error - // is empty, but should not rely on the stability of the Error text across Kubelet versions. - // +optional - Error string -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // +optional - Capacity ResourceList - // Allocatable represents the resources of a node that are available for scheduling. - // +optional - Allocatable ResourceList - // NodePhase is the current lifecycle phase of the node. - // +optional - Phase NodePhase - // Conditions is an array of current node conditions. - // +optional - Conditions []NodeCondition - // Queried from cloud provider, if available. - // +optional - Addresses []NodeAddress - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints - // Set of ids/uuids to uniquely identify the node. - // +optional - NodeInfo NodeSystemInfo - // List of container images on this node - // +optional - Images []ContainerImage - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume - // Status of the config assigned to the node via the dynamic Kubelet config feature. - // +optional - Config *NodeConfigStatus -} - -// UniqueVolumeName defines the name of attached volume -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName - - // DevicePath represents the device path where the volume should be available - DevicePath string -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry -} - -// PreferAvoidPodsEntry describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time - // (brief) reason why this entry was added to the list. - // +optional - Reason string - // Human readable message indicating why this entry was added to the list. - // +optional - Message string -} - -// PodSignature describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference -} - -// ContainerImage describe a container image -type ContainerImage struct { - // Names by which this image is known. - // +optional - Names []string - // The size of the image in bytes. - // +optional - SizeBytes int64 -} - -// NodePhase defines the phase in which a node is in -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -// NodeConditionType defines node's condition -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" -) - -// NodeCondition represents the node's condition -type NodeCondition struct { - Type NodeConditionType - Status ConditionStatus - // +optional - LastHeartbeatTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// NodeAddressType defines the node's address type -type NodeAddressType string - -// These are valid values of node address type -const ( - // NodeHostName identifies a name of the node. Although every node can be assumed - // to have a NodeAddress of this type, its exact syntax and semantics are not - // defined, and are not consistent between different clusters. - NodeHostName NodeAddressType = "Hostname" - - // NodeInternalIP identifies an IP address which is assigned to one of the node's - // network interfaces. Every node should have at least one address of this type. - // - // An internal IP is normally expected to be reachable from every other node, but - // may not be visible to hosts outside the cluster. By default it is assumed that - // kube-apiserver can reach node internal IPs, though it is possible to configure - // clusters where this is not the case. - // - // NodeInternalIP is the default type of node IP, and does not necessarily imply - // that the IP is ONLY reachable internally. If a node has multiple internal IPs, - // no specific semantics are assigned to the additional IPs. - NodeInternalIP NodeAddressType = "InternalIP" - - // NodeExternalIP identifies an IP address which is, in some way, intended to be - // more usable from outside the cluster then an internal IP, though no specific - // semantics are defined. It may be a globally routable IP, though it is not - // required to be. - // - // External IPs may be assigned directly to an interface on the node, like a - // NodeInternalIP, or alternatively, packets sent to the external IP may be NAT'ed - // to an internal node IP rather than being delivered directly (making the IP less - // efficient for node-to-node traffic than a NodeInternalIP). - NodeExternalIP NodeAddressType = "ExternalIP" - - // NodeInternalDNS identifies a DNS name which resolves to an IP address which has - // the characteristics of a NodeInternalIP. The IP it resolves to may or may not - // be a listed NodeInternalIP address. - NodeInternalDNS NodeAddressType = "InternalDNS" - - // NodeExternalDNS identifies a DNS name which resolves to an IP address which has - // the characteristics of a NodeExternalIP. The IP it resolves to may or may not - // be a listed NodeExternalIP address. - NodeExternalDNS NodeAddressType = "ExternalDNS" -) - -// NodeAddress represents node's address -type NodeAddress struct { - Type NodeAddressType - Address string -} - -// NodeResources is an object for conveying resource information about a node. -// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - // +optional - Capacity ResourceList -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. - ResourceEphemeralStorage ResourceName = "ephemeral-storage" -) - -const ( - // ResourceDefaultNamespacePrefix is the default namespace prefix. - ResourceDefaultNamespacePrefix = "kubernetes.io/" - // ResourceHugePagesPrefix is the name prefix for huge page resources (alpha). - ResourceHugePagesPrefix = "hugepages-" - // ResourceAttachableVolumesPrefix is the name prefix for storage resource limits - ResourceAttachableVolumesPrefix = "attachable-volumes-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a node. - // +optional - Spec NodeSpec - - // Status describes the current status of a Node - // +optional - Status NodeStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeList is a list of nodes. -type NodeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Node -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // +optional - Phase NamespacePhase - // +optional - Conditions []NamespaceCondition -} - -// NamespacePhase defines the phase in which the namespace is -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// NamespaceConditionType defines constants reporting on status during namespace lifetime and deletion progress -type NamespaceConditionType string - -// These are valid conditions of a namespace. -const ( - NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" - NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" - NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" -) - -// NamespaceCondition contains details about state of namespace. -type NamespaceCondition struct { - // Type of namespace controller condition. - Type NamespaceConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Namespace. - // +optional - Spec NamespaceSpec - - // Status describes the current status of a Namespace - // +optional - Status NamespaceStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Namespace -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -type Binding struct { - metav1.TypeMeta - // ObjectMeta describes the object that is being bound. - // +optional - metav1.ObjectMeta - - // Target is the object to bind to. - Target ObjectReference -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - metav1.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *metav1.Time - // If true, add an RFC 3339 timestamp with 9 digits of fractional seconds at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 - - // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the - // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver - // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real - // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the - // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept - // the actual log data coming from the real kubelet). - // +optional - InsecureSkipTLSVerifyBackend bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the attach call - // +optional - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the attach call - // +optional - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the attach call - // +optional - Stderr bool - - // TTY if true indicates that a tty will be allocated for the attach call - // +optional - TTY bool - - // Container to attach to. - // +optional - Container string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPortForwardOptions is the query options to a Pod's port forward call -type PodPortForwardOptions struct { - metav1.TypeMeta - - // The list of ports to forward - // +optional - Ports []int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ObjectReference struct { - // +optional - Kind string - // +optional - Namespace string - // +optional - Name string - // +optional - UID types.UID - // +optional - APIVersion string - // +optional - ResourceVersion string - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - // TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -// TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. -type TypedLocalObjectReference struct { - // APIGroup is the group for the resource being referenced. - // If APIGroup is not specified, the specified Kind must be in the core API group. - // For any other third-party types, APIGroup is required. - // +optional - APIGroup *string - // Kind is the type of resource being referenced - Kind string - // Name is the name of resource being referenced - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SerializedReference represents a serialized object reference -type SerializedReference struct { - metav1.TypeMeta - // +optional - Reference ObjectReference -} - -// EventSource represents the source from which an event is generated -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string - // Node name on which the event is generated. - // +optional - Host string -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. Events -// have a limited retention time and triggers and messages may evolve -// with time. Event consumers should not rely on the timing of an event -// with a given Reason reflecting a consistent underlying trigger, or the -// continued existence of events with that Reason. Events should be -// treated as informative, best-effort, supplemental data. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - metav1.TypeMeta - - metav1.ObjectMeta - - // The object that this event is about. Mapped to events.Event.regarding - // +optional - InvolvedObject ObjectReference - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - // +optional - Reason string - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. Mapped to events.Event.note - // +optional - Message string - - // Optional. The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time - - // The number of times this event has occurred. - // +optional - Count int32 - - // Type of this event (Normal, Warning), new types could be added in the future. - // +optional - Type string - - // Time when this Event was first observed. - // +optional - EventTime metav1.MicroTime - - // Data about the Event series this event represents or nil if it's a singleton Event. - // +optional - Series *EventSeries - - // What action was taken/failed regarding to the Regarding object. - // +optional - Action string - - // Optional secondary object for more complex actions. - // +optional - Related *ObjectReference - - // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. - // +optional - ReportingController string - - // ID of the controller instance, e.g. `kubelet-xyzf`. - // +optional - ReportingInstance string -} - -// EventSeries represents a series ov events -type EventSeries struct { - // Number of occurrences in this series up to the last heartbeat time - Count int32 - // Time of the last occurrence observed - LastObservedTime metav1.MicroTime -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Event -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// List holds a list of objects, which may not be known by the server. -type List metainternalversion.List - -// LimitType defines a type of object that is limited -type LimitType string - -const ( - // LimitTypePod defines limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // LimitTypeContainer defines limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // LimitTypePersistentVolumeClaim defines limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - // +optional - Type LimitType - // Max usage constraints on this kind by resource name - // +optional - Max ResourceList - // Min usage constraints on this kind by resource name - // +optional - Min ResourceList - // Default resource requirement limit value by resource name. - // +optional - Default ResourceList - // DefaultRequest resource requirement request value by resource name. - // +optional - DefaultRequest ResourceList - // MaxLimitRequestRatio represents the max burst value for the named resource - // +optional - MaxLimitRequestRatio ResourceList -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the limits enforced - // +optional - Spec LimitRangeSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of LimitRange objects - Items []LimitRange -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" - // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" -) - -// The following identify resource prefix for Kubernetes object types -const ( - // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. - ResourceRequestsHugePagesPrefix = "requests.hugepages-" - // Default resource requests prefix - DefaultResourceRequestsPrefix = "requests." -) - -// ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -// These are valid values for resource quota spec -const ( - // Match all pod objects where spec.activeDeadlineSeconds >=0 - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where spec.activeDeadlineSeconds is nil - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" - // Match all pod objects that have priority class mentioned - ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" - // Match all pod objects that have cross-namespace pod (anti)affinity mentioned - ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - // +optional - Hard ResourceList - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope - // ScopeSelector is also a collection of filters like Scopes that must match each object tracked by a quota - // but expressed using ScopeSelectorOperator in combination with possible values. - // +optional - ScopeSelector *ScopeSelector -} - -// ScopeSelector represents the AND of the selectors represented -// by the scoped-resource selector terms. -type ScopeSelector struct { - // A list of scope selector requirements by scope of the resources. - // +optional - MatchExpressions []ScopedResourceSelectorRequirement -} - -// ScopedResourceSelectorRequirement is a selector that contains values, a scope name, and an operator -// that relates the scope name and values. -type ScopedResourceSelectorRequirement struct { - // The name of the scope that the selector applies to. - ScopeName ResourceQuotaScope - // Represents a scope's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. - Operator ScopeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// ScopeSelectorOperator is the set of operators that can be used in -// a scope selector requirement. -type ScopeSelectorOperator string - -// These are the valid values for ScopeSelectorOperator -const ( - ScopeSelectorOpIn ScopeSelectorOperator = "In" - ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" - ScopeSelectorOpExists ScopeSelectorOperator = "Exists" - ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" -) - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - // +optional - Hard ResourceList - // Used is the current observed total usage of the resource in the namespace - // +optional - Used ResourceList -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired quota - // +optional - Spec ResourceQuotaSpec - - // Status defines the actual enforced quota and its current usage - // +optional - Status ResourceQuotaStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of ResourceQuota objects - Items []ResourceQuota -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Immutable field, if set, ensures that data stored in the Secret cannot - // be updated (only object metadata can be modified). - // +optional - Immutable *bool - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. - // +optional - Data map[string][]byte `datapolicy:"password,security-key,token"` - - // Used to facilitate programmatic handling of secret data. - // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types - // +optional - Type SecretType -} - -// MaxSecretSize represents the max secret size. -const MaxSecretSize = 1 * 1024 * 1024 - -// SecretType defines the types of secrets -type SecretType string - -// These are the valid values for SecretType -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJSON contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJSON SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJSONKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJSONKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authentication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secret. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SecretList represents the list of secrets -type SecretList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Secret -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Immutable field, if set, ensures that data stored in the ConfigMap cannot - // be updated (only object metadata can be modified). - // +optional - Immutable *bool - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // Values with non-UTF-8 byte sequences must use the BinaryData field. - // The keys stored in Data must not overlap with the keys in - // the BinaryData field, this is enforced during validation process. - // +optional - Data map[string]string - - // BinaryData contains the binary data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // BinaryData can contain byte sequences that are not in the UTF-8 range. - // The keys stored in BinaryData must not overlap with the ones in - // the Data field, this is enforced during validation process. - // Using this field will require 1.10+ apiserver and - // kubelet. - // +optional - BinaryData map[string][]byte -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of ConfigMaps. - Items []ConfigMap -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParam = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// ComponentConditionType defines type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -// ComponentCondition represents the condition of a component -type ComponentCondition struct { - Type ComponentConditionType - Status ConditionStatus - // +optional - Message string - // +optional - Error string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -// Deprecated: This API is deprecated in v1.19+ -type ComponentStatus struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Conditions []ComponentCondition -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatusList represents the list of component statuses -// Deprecated: This API is deprecated in v1.19+ -type ComponentStatusList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ComponentStatus -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - Capabilities *Capabilities - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - Privileged *bool - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - SELinuxOptions *SELinuxOptions - // The Windows specific settings applied to all containers. - // If unspecified, the options from the PodSecurityContext will be used. - // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - // Note that this field cannot be set when spec.os.name is linux. - // +optional - WindowsOptions *WindowsSecurityContextOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - RunAsUser *int64 - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - RunAsGroup *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - ReadOnlyRootFilesystem *bool - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - AllowPrivilegeEscalation *bool - // ProcMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for - // readonly paths and masked paths. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - ProcMount *ProcMountType - // The seccomp options to use by this container. If seccomp options are - // provided at both the pod & container level, the container options - // override the pod options. - // Note that this field cannot be set when spec.os.name is windows. - // +optional - SeccompProfile *SeccompProfile -} - -// ProcMountType defines the type of proc mount -type ProcMountType string - -const ( - // DefaultProcMount uses the container runtime defaults for readonly and masked - // paths for /proc. Most container runtimes mask certain paths in /proc to avoid - // accidental security exposure of special devices or information. - DefaultProcMount ProcMountType = "Default" - - // UnmaskedProcMount bypasses the default masking behavior of the container - // runtime and ensures the newly created /proc the container stays intact with - // no modifications. - UnmaskedProcMount ProcMountType = "Unmasked" -) - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - // +optional - User string - // SELinux role label - // +optional - Role string - // SELinux type label - // +optional - Type string - // SELinux level label. - // +optional - Level string -} - -// WindowsSecurityContextOptions contain Windows-specific options and credentials. -type WindowsSecurityContextOptions struct { - // GMSACredentialSpecName is the name of the GMSA credential spec to use. - // +optional - GMSACredentialSpecName *string - - // GMSACredentialSpec is where the GMSA admission webhook - // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - // GMSA credential spec named by the GMSACredentialSpecName field. - // +optional - GMSACredentialSpec *string - - // The UserName in Windows to run the entrypoint of the container process. - // Defaults to the user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUserName *string - - // HostProcess determines if a container should be run as a 'Host Process' container. - // All of a Pod's containers must have the same effective HostProcess value - // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - // In addition, if HostProcess is true then HostNetwork must also be set to true. - // +optional - HostProcess *bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte -} - -const ( - // DefaultHardPodAffinitySymmetricWeight is the weight of implicit PreferredDuringScheduling affinity rule. - // - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int32 = 1 -) - -// UnsatisfiableConstraintAction defines the actions that can be taken for an -// unsatisfiable constraint. -type UnsatisfiableConstraintAction string - -const ( - // DoNotSchedule instructs the scheduler not to schedule the pod - // when constraints are not satisfied. - DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" - // ScheduleAnyway instructs the scheduler to schedule the pod - // even if constraints are not satisfied. - ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" -) - -// NodeInclusionPolicy defines the type of node inclusion policy -// +enum -type NodeInclusionPolicy string - -const ( - // NodeInclusionPolicyIgnore means ignore this scheduling directive when calculating pod topology spread skew. - NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore" - // NodeInclusionPolicyHonor means use this scheduling directive when calculating pod topology spread skew. - NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor" -) - -// TopologySpreadConstraint specifies how to spread matching pods among the given topology. -type TopologySpreadConstraint struct { - // MaxSkew describes the degree to which pods may be unevenly distributed. - // When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference - // between the number of matching pods in the target topology and the global minimum. - // The global minimum is the minimum number of matching pods in an eligible domain - // or zero if the number of eligible domains is less than MinDomains. - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 2/2/1: - // In this case, the global minimum is 1. - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P P | P P | P | - // +-------+-------+-------+ - // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; - // scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) - // violate MaxSkew(1). - // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - // When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence - // to topologies that satisfy it. - // It's a required field. Default value is 1 and 0 is not allowed. - MaxSkew int32 - // TopologyKey is the key of node labels. Nodes that have a label with this key - // and identical values are considered to be in the same topology. - // We consider each as a "bucket", and try to put balanced number - // of pods into each bucket. - // We define a domain as a particular instance of a topology. - // Also, we define an eligible domain as a domain whose nodes meet the requirements of - // nodeAffinityPolicy and nodeTaintsPolicy. - // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. - // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. - // It's a required field. - TopologyKey string - // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - // the spread constraint. - // - DoNotSchedule (default) tells the scheduler not to schedule it. - // - ScheduleAnyway tells the scheduler to schedule the pod in any location, - // but giving higher precedence to topologies that would help reduce the - // skew. - // A constraint is considered "Unsatisfiable" for an incoming pod - // if and only if every possible node assignment for that pod would violate - // "MaxSkew" on some topology. - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 3/1/1: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P P P | P | P | - // +-------+-------+-------+ - // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - // won't make it *more* imbalanced. - // It's a required field. - WhenUnsatisfiable UnsatisfiableConstraintAction - // LabelSelector is used to find matching pods. - // Pods that match this label selector are counted to determine the number of pods - // in their corresponding topology domain. - // +optional - LabelSelector *metav1.LabelSelector - // MinDomains indicates a minimum number of eligible domains. - // When the number of eligible domains with matching topology keys is less than minDomains, - // Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. - // And when the number of eligible domains with matching topology keys equals or greater than minDomains, - // this value has no effect on scheduling. - // As a result, when the number of eligible domains is less than minDomains, - // scheduler won't schedule more than maxSkew Pods to those domains. - // If value is nil, the constraint behaves as if MinDomains is equal to 1. - // Valid values are integers greater than 0. - // When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - // - // For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same - // labelSelector spread as 2/2/2: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P P | P P | P P | - // +-------+-------+-------+ - // The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. - // In this situation, new pod with the same labelSelector cannot be scheduled, - // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, - // it will violate MaxSkew. - // - // This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). - // +optional - MinDomains *int32 - // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector - // when calculating pod topology spread skew. Options are: - // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - // - // If this value is nil, the behavior is equivalent to the Honor policy. - // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - // +optional - NodeAffinityPolicy *NodeInclusionPolicy - // NodeTaintsPolicy indicates how we will treat node taints when calculating - // pod topology spread skew. Options are: - // - Honor: nodes without taints, along with tainted nodes for which the incoming pod - // has a toleration, are included. - // - Ignore: node taints are ignored. All nodes are included. - // - // If this value is nil, the behavior is equivalent to the Ignore policy. - // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. - // +optional - NodeTaintsPolicy *NodeInclusionPolicy - // MatchLabelKeys is a set of pod label keys to select the pods over which - // spreading will be calculated. The keys are used to lookup values from the - // incoming pod labels, those key-value labels are ANDed with labelSelector - // to select the group of existing pods over which spreading will be calculated - // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. - // MatchLabelKeys cannot be set when LabelSelector isn't set. - // Keys that don't exist in the incoming pod labels will - // be ignored. A null or empty list means only match against labelSelector. - // - // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). - // +listType=atomic - // +optional - MatchLabelKeys []string -} - -// These are the built-in errors for PortStatus. -const ( - // MixedProtocolNotSupported error in PortStatus means that the cloud provider - // can't ensure the port on the load balancer because mixed values of protocols - // on the same LoadBalancer type of Service are not supported by the cloud provider. - MixedProtocolNotSupported = "MixedProtocolNotSupported" -) - -// PortStatus represents the error condition of a service port -type PortStatus struct { - // Port is the port number of the service port of which status is recorded here - Port int32 - // Protocol is the protocol of the service port of which status is recorded here - Protocol Protocol - // Error is to record the problem with the service port - // The format of the error shall comply with the following rules: - // - built-in error values shall be specified in this file and those shall use - // CamelCase names - // - cloud provider specific error values must have names that comply with the - // format foo.example.com/CamelCase. - // --- - // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) - // +optional - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` - // +kubebuilder:validation:MaxLength=316 - Error *string -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go deleted file mode 100644 index 471fdbd6f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go +++ /dev/null @@ -1,6256 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package core - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { - if in == nil { - return nil - } - out := new(AWSElasticBlockStoreVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Affinity) DeepCopyInto(out *Affinity) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. -func (in *Affinity) DeepCopy() *Affinity { - if in == nil { - return nil - } - out := new(Affinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. -func (in *AttachedVolume) DeepCopy() *AttachedVolume { - if in == nil { - return nil - } - out := new(AttachedVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. -func (in *AvoidPods) DeepCopy() *AvoidPods { - if in == nil { - return nil - } - out := new(AvoidPods) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - *out = new(AzureDataDiskCachingMode) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - *out = new(AzureDataDiskKind) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. -func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { - if in == nil { - return nil - } - out := new(AzureDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { - *out = *in - if in.SecretNamespace != nil { - in, out := &in.SecretNamespace, &out.SecretNamespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. -func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { - if in == nil { - return nil - } - out := new(AzureFilePersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. -func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { - if in == nil { - return nil - } - out := new(AzureFileVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Target = in.Target - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { - *out = *in - if in.VolumeAttributes != nil { - in, out := &in.VolumeAttributes, &out.VolumeAttributes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ControllerPublishSecretRef != nil { - in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodeStageSecretRef != nil { - in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodePublishSecretRef != nil { - in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - *out = new(SecretReference) - **out = **in - } - if in.ControllerExpandSecretRef != nil { - in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodeExpandSecretRef != nil { - in, out := &in.NodeExpandSecretRef, &out.NodeExpandSecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. -func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CSIPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { - *out = *in - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.VolumeAttributes != nil { - in, out := &in.VolumeAttributes, &out.VolumeAttributes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodePublishSecretRef != nil { - in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. -func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { - if in == nil { - return nil - } - out := new(CSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capabilities) DeepCopyInto(out *Capabilities) { - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. -func (in *Capabilities) DeepCopy() *Capabilities { - if in == nil { - return nil - } - out := new(Capabilities) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. -func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CephFSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. -func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { - if in == nil { - return nil - } - out := new(CephFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. -func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CinderPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. -func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { - if in == nil { - return nil - } - out := new(CinderVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClaimSource) DeepCopyInto(out *ClaimSource) { - *out = *in - if in.ResourceClaimName != nil { - in, out := &in.ResourceClaimName, &out.ResourceClaimName - *out = new(string) - **out = **in - } - if in.ResourceClaimTemplateName != nil { - in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimSource. -func (in *ClaimSource) DeepCopy() *ClaimSource { - if in == nil { - return nil - } - out := new(ClaimSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. -func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { - if in == nil { - return nil - } - out := new(ClientIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. -func (in *ComponentCondition) DeepCopy() *ComponentCondition { - if in == nil { - return nil - } - out := new(ComponentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. -func (in *ComponentStatus) DeepCopy() *ComponentStatus { - if in == nil { - return nil - } - out := new(ComponentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. -func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { - if in == nil { - return nil - } - out := new(ComponentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Immutable != nil { - in, out := &in.Immutable, &out.Immutable - *out = new(bool) - **out = **in - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.BinaryData != nil { - in, out := &in.BinaryData, &out.BinaryData - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - var outVal []byte - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]byte, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. -func (in *ConfigMap) DeepCopy() *ConfigMap { - if in == nil { - return nil - } - out := new(ConfigMap) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMap) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. -func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { - if in == nil { - return nil - } - out := new(ConfigMapEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. -func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { - if in == nil { - return nil - } - out := new(ConfigMapKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. -func (in *ConfigMapList) DeepCopy() *ConfigMapList { - if in == nil { - return nil - } - out := new(ConfigMapList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMapList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. -func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { - if in == nil { - return nil - } - out := new(ConfigMapNodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. -func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { - if in == nil { - return nil - } - out := new(ConfigMapProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. -func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { - if in == nil { - return nil - } - out := new(ConfigMapVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.ResizePolicy != nil { - in, out := &in.ResizePolicy, &out.ResizePolicy - *out = make([]ContainerResizePolicy, len(*in)) - copy(*out, *in) - } - if in.RestartPolicy != nil { - in, out := &in.RestartPolicy, &out.RestartPolicy - *out = new(ContainerRestartPolicy) - **out = **in - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeDevices != nil { - in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]VolumeDevice, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.StartupProbe != nil { - in, out := &in.StartupProbe, &out.StartupProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. -func (in *ContainerImage) DeepCopy() *ContainerImage { - if in == nil { - return nil - } - out := new(ContainerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. -func (in *ContainerPort) DeepCopy() *ContainerPort { - if in == nil { - return nil - } - out := new(ContainerPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy. -func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy { - if in == nil { - return nil - } - out := new(ContainerResizePolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerState) DeepCopyInto(out *ContainerState) { - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - **out = **in - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. -func (in *ContainerState) DeepCopy() *ContainerState { - if in == nil { - return nil - } - out := new(ContainerState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. -func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { - if in == nil { - return nil - } - out := new(ContainerStateRunning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. -func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { - if in == nil { - return nil - } - out := new(ContainerStateTerminated) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. -func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { - if in == nil { - return nil - } - out := new(ContainerStateWaiting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { - *out = *in - in.State.DeepCopyInto(&out.State) - in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - if in.Started != nil { - in, out := &in.Started, &out.Started - *out = new(bool) - **out = **in - } - if in.AllocatedResources != nil { - in, out := &in.AllocatedResources, &out.AllocatedResources - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(ResourceRequirements) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. -func (in *ContainerStatus) DeepCopy() *ContainerStatus { - if in == nil { - return nil - } - out := new(ContainerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. -func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { - if in == nil { - return nil - } - out := new(DaemonEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. -func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { - if in == nil { - return nil - } - out := new(DownwardAPIProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. -func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. -func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { - *out = *in - if in.SizeLimit != nil { - in, out := &in.SizeLimit, &out.SizeLimit - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. -func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { - if in == nil { - return nil - } - out := new(EmptyDirVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - *out = new(string) - **out = **in - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. -func (in *EndpointAddress) DeepCopy() *EndpointAddress { - if in == nil { - return nil - } - out := new(EndpointAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { - *out = *in - if in.AppProtocol != nil { - in, out := &in.AppProtocol, &out.AppProtocol - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. -func (in *EndpointPort) DeepCopy() *EndpointPort { - if in == nil { - return nil - } - out := new(EndpointPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. -func (in *EndpointSubset) DeepCopy() *EndpointSubset { - if in == nil { - return nil - } - out := new(EndpointSubset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoints) DeepCopyInto(out *Endpoints) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. -func (in *Endpoints) DeepCopy() *Endpoints { - if in == nil { - return nil - } - out := new(Endpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Endpoints) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. -func (in *EndpointsList) DeepCopy() *EndpointsList { - if in == nil { - return nil - } - out := new(EndpointsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EndpointsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. -func (in *EnvFromSource) DeepCopy() *EnvFromSource { - if in == nil { - return nil - } - out := new(EnvFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. -func (in *EnvVarSource) DeepCopy() *EnvVarSource { - if in == nil { - return nil - } - out := new(EnvVarSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { - *out = *in - in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. -func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { - if in == nil { - return nil - } - out := new(EphemeralContainer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.ResizePolicy != nil { - in, out := &in.ResizePolicy, &out.ResizePolicy - *out = make([]ContainerResizePolicy, len(*in)) - copy(*out, *in) - } - if in.RestartPolicy != nil { - in, out := &in.RestartPolicy, &out.RestartPolicy - *out = new(ContainerRestartPolicy) - **out = **in - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeDevices != nil { - in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]VolumeDevice, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.StartupProbe != nil { - in, out := &in.StartupProbe, &out.StartupProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. -func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { - if in == nil { - return nil - } - out := new(EphemeralContainerCommon) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) { - *out = *in - if in.VolumeClaimTemplate != nil { - in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate - *out = new(PersistentVolumeClaimTemplate) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource. -func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource { - if in == nil { - return nil - } - out := new(EphemeralVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.InvolvedObject = in.InvolvedObject - out.Source = in.Source - in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) - in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) - in.EventTime.DeepCopyInto(&out.EventTime) - if in.Series != nil { - in, out := &in.Series, &out.Series - *out = new(EventSeries) - (*in).DeepCopyInto(*out) - } - if in.Related != nil { - in, out := &in.Related, &out.Related - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSeries) DeepCopyInto(out *EventSeries) { - *out = *in - in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. -func (in *EventSeries) DeepCopy() *EventSeries { - if in == nil { - return nil - } - out := new(EventSeries) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSource) DeepCopyInto(out *EventSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. -func (in *EventSource) DeepCopy() *EventSource { - if in == nil { - return nil - } - out := new(EventSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecAction) DeepCopyInto(out *ExecAction) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. -func (in *ExecAction) DeepCopy() *ExecAction { - if in == nil { - return nil - } - out := new(ExecAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - if in.WWIDs != nil { - in, out := &in.WWIDs, &out.WWIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. -func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { - if in == nil { - return nil - } - out := new(FCVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. -func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { - if in == nil { - return nil - } - out := new(FlexPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. -func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { - if in == nil { - return nil - } - out := new(FlexVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. -func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { - if in == nil { - return nil - } - out := new(FlockerVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. -func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(GCEPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GRPCAction) DeepCopyInto(out *GRPCAction) { - *out = *in - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAction. -func (in *GRPCAction) DeepCopy() *GRPCAction { - if in == nil { - return nil - } - out := new(GRPCAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. -func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { - if in == nil { - return nil - } - out := new(GitRepoVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { - *out = *in - if in.EndpointsNamespace != nil { - in, out := &in.EndpointsNamespace, &out.EndpointsNamespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. -func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. -func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { - *out = *in - out.Port = in.Port - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. -func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { - if in == nil { - return nil - } - out := new(HTTPGetAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostAlias) DeepCopyInto(out *HostAlias) { - *out = *in - if in.Hostnames != nil { - in, out := &in.Hostnames, &out.Hostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. -func (in *HostAlias) DeepCopy() *HostAlias { - if in == nil { - return nil - } - out := new(HostAlias) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostIP) DeepCopyInto(out *HostIP) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP. -func (in *HostIP) DeepCopy() *HostIP { - if in == nil { - return nil - } - out := new(HostIP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { - *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(HostPathType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. -func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { - if in == nil { - return nil - } - out := new(HostPathVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. -func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. -func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. -func (in *KeyToPath) DeepCopy() *KeyToPath { - if in == nil { - return nil - } - out := new(KeyToPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(LifecycleHandler) - (*in).DeepCopyInto(*out) - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(LifecycleHandler) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. -func (in *Lifecycle) DeepCopy() *Lifecycle { - if in == nil { - return nil - } - out := new(Lifecycle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHandler. -func (in *LifecycleHandler) DeepCopy() *LifecycleHandler { - if in == nil { - return nil - } - out := new(LifecycleHandler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRange) DeepCopyInto(out *LimitRange) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. -func (in *LimitRange) DeepCopy() *LimitRange { - if in == nil { - return nil - } - out := new(LimitRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRange) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. -func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { - if in == nil { - return nil - } - out := new(LimitRangeItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. -func (in *LimitRangeList) DeepCopy() *LimitRangeList { - if in == nil { - return nil - } - out := new(LimitRangeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRangeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. -func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { - if in == nil { - return nil - } - out := new(LimitRangeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *List) DeepCopyInto(out *List) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if (*in)[i] != nil { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. -func (in *List) DeepCopy() *List { - if in == nil { - return nil - } - out := new(List) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *List) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]PortStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. -func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { - if in == nil { - return nil - } - out := new(LoadBalancerIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. -func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { - if in == nil { - return nil - } - out := new(LoadBalancerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { - *out = *in - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. -func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { - if in == nil { - return nil - } - out := new(LocalVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. -func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { - if in == nil { - return nil - } - out := new(NFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Namespace) DeepCopyInto(out *Namespace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. -func (in *Namespace) DeepCopy() *Namespace { - if in == nil { - return nil - } - out := new(Namespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Namespace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. -func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { - if in == nil { - return nil - } - out := new(NamespaceCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. -func (in *NamespaceList) DeepCopy() *NamespaceList { - if in == nil { - return nil - } - out := new(NamespaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NamespaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NamespaceCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. -func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { - if in == nil { - return nil - } - out := new(NamespaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. -func (in *NodeAddress) DeepCopy() *NodeAddress { - if in == nil { - return nil - } - out := new(NodeAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. -func (in *NodeAffinity) DeepCopy() *NodeAffinity { - if in == nil { - return nil - } - out := new(NodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. -func (in *NodeCondition) DeepCopy() *NodeCondition { - if in == nil { - return nil - } - out := new(NodeCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { - *out = *in - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapNodeConfigSource) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. -func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { - if in == nil { - return nil - } - out := new(NodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { - *out = *in - if in.Assigned != nil { - in, out := &in.Assigned, &out.Assigned - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - if in.LastKnownGood != nil { - in, out := &in.LastKnownGood, &out.LastKnownGood - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. -func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { - if in == nil { - return nil - } - out := new(NodeConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { - *out = *in - out.KubeletEndpoint = in.KubeletEndpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. -func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { - if in == nil { - return nil - } - out := new(NodeDaemonEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. -func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { - if in == nil { - return nil - } - out := new(NodeProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { - if in == nil { - return nil - } - out := new(NodeResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. -func (in *NodeSelector) DeepCopy() *NodeSelector { - if in == nil { - return nil - } - out := new(NodeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. -func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { - if in == nil { - return nil - } - out := new(NodeSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.MatchFields != nil { - in, out := &in.MatchFields, &out.MatchFields - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. -func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { - if in == nil { - return nil - } - out := new(NodeSelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - if in.PodCIDRs != nil { - in, out := &in.PodCIDRs, &out.PodCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(NodeConfigStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. -func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { - if in == nil { - return nil - } - out := new(NodeSystemInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. -func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { - if in == nil { - return nil - } - out := new(ObjectFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ObjectReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. -func (in *PersistentVolume) DeepCopy() *PersistentVolume { - if in == nil { - return nil - } - out := new(PersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolume) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { - if in == nil { - return nil - } - out := new(PersistentVolumeClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. -func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. -func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Resources.DeepCopyInto(&out.Resources) - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(PersistentVolumeMode) - **out = **in - } - if in.DataSource != nil { - in, out := &in.DataSource, &out.DataSource - *out = new(TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } - if in.DataSourceRef != nil { - in, out := &in.DataSourceRef, &out.DataSourceRef - *out = new(TypedObjectReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. -func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PersistentVolumeClaimCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AllocatedResources != nil { - in, out := &in.AllocatedResources, &out.AllocatedResources - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.AllocatedResourceStatuses != nil { - in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses - *out = make(map[ResourceName]ClaimResourceStatus, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. -func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate. -func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. -func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. -func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { - if in == nil { - return nil - } - out := new(PersistentVolumeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CSI != nil { - in, out := &in.CSI, &out.CSI - *out = new(CSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. -func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - **out = **in - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(PersistentVolumeMode) - **out = **in - } - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(VolumeNodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. -func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { - *out = *in - if in.LastPhaseTransitionTime != nil { - in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. -func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. -func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(PhotonPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. -func (in *PodAffinity) DeepCopy() *PodAffinity { - if in == nil { - return nil - } - out := new(PodAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. -func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { - if in == nil { - return nil - } - out := new(PodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. -func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { - if in == nil { - return nil - } - out := new(PodAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. -func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { - if in == nil { - return nil - } - out := new(PodAttachOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodAttachOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { - *out = *in - if in.Nameservers != nil { - in, out := &in.Nameservers, &out.Nameservers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Searches != nil { - in, out := &in.Searches, &out.Searches - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make([]PodDNSConfigOption, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. -func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { - if in == nil { - return nil - } - out := new(PodDNSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. -func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { - if in == nil { - return nil - } - out := new(PodDNSConfigOption) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. -func (in *PodExecOptions) DeepCopy() *PodExecOptions { - if in == nil { - return nil - } - out := new(PodExecOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodExecOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodIP) DeepCopyInto(out *PodIP) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. -func (in *PodIP) DeepCopy() *PodIP { - if in == nil { - return nil - } - out := new(PodIP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = **in - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - *out = (*in).DeepCopy() - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - *out = new(int64) - **out = **in - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. -func (in *PodLogOptions) DeepCopy() *PodLogOptions { - if in == nil { - return nil - } - out := new(PodLogOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodOS) DeepCopyInto(out *PodOS) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOS. -func (in *PodOS) DeepCopy() *PodOS { - if in == nil { - return nil - } - out := new(PodOS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. -func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { - if in == nil { - return nil - } - out := new(PodPortForwardOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. -func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { - if in == nil { - return nil - } - out := new(PodProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. -func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { - if in == nil { - return nil - } - out := new(PodReadinessGate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim. -func (in *PodResourceClaim) DeepCopy() *PodResourceClaim { - if in == nil { - return nil - } - out := new(PodResourceClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) { - *out = *in - if in.ResourceClaimName != nil { - in, out := &in.ResourceClaimName, &out.ResourceClaimName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus. -func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus { - if in == nil { - return nil - } - out := new(PodResourceClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate. -func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate { - if in == nil { - return nil - } - out := new(PodSchedulingGate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { - *out = *in - if in.ShareProcessNamespace != nil { - in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace - *out = new(bool) - **out = **in - } - if in.HostUsers != nil { - in, out := &in.HostUsers, &out.HostUsers - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.WindowsOptions != nil { - in, out := &in.WindowsOptions, &out.WindowsOptions - *out = new(WindowsSecurityContextOptions) - (*in).DeepCopyInto(*out) - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - *out = new(int64) - **out = **in - } - if in.FSGroupChangePolicy != nil { - in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy - *out = new(PodFSGroupChangePolicy) - **out = **in - } - if in.Sysctls != nil { - in, out := &in.Sysctls, &out.Sysctls - *out = make([]Sysctl, len(*in)) - copy(*out, *in) - } - if in.SeccompProfile != nil { - in, out := &in.SeccompProfile, &out.SeccompProfile - *out = new(SeccompProfile) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. -func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { - if in == nil { - return nil - } - out := new(PodSecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSignature) DeepCopyInto(out *PodSignature) { - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - *out = new(v1.OwnerReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. -func (in *PodSignature) DeepCopy() *PodSignature { - if in == nil { - return nil - } - out := new(PodSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EphemeralContainers != nil { - in, out := &in.EphemeralContainers, &out.EphemeralContainers - *out = make([]EphemeralContainer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.SetHostnameAsFQDN != nil { - in, out := &in.SetHostnameAsFQDN, &out.SetHostnameAsFQDN - *out = new(bool) - **out = **in - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(int32) - **out = **in - } - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(PreemptionPolicy) - **out = **in - } - if in.DNSConfig != nil { - in, out := &in.DNSConfig, &out.DNSConfig - *out = new(PodDNSConfig) - (*in).DeepCopyInto(*out) - } - if in.ReadinessGates != nil { - in, out := &in.ReadinessGates, &out.ReadinessGates - *out = make([]PodReadinessGate, len(*in)) - copy(*out, *in) - } - if in.RuntimeClassName != nil { - in, out := &in.RuntimeClassName, &out.RuntimeClassName - *out = new(string) - **out = **in - } - if in.Overhead != nil { - in, out := &in.Overhead, &out.Overhead - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.EnableServiceLinks != nil { - in, out := &in.EnableServiceLinks, &out.EnableServiceLinks - *out = new(bool) - **out = **in - } - if in.TopologySpreadConstraints != nil { - in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.OS != nil { - in, out := &in.OS, &out.OS - *out = new(PodOS) - **out = **in - } - if in.SchedulingGates != nil { - in, out := &in.SchedulingGates, &out.SchedulingGates - *out = make([]PodSchedulingGate, len(*in)) - copy(*out, *in) - } - if in.ResourceClaims != nil { - in, out := &in.ResourceClaims, &out.ResourceClaims - *out = make([]PodResourceClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostIPs != nil { - in, out := &in.HostIPs, &out.HostIPs - *out = make([]HostIP, len(*in)) - copy(*out, *in) - } - if in.PodIPs != nil { - in, out := &in.PodIPs, &out.PodIPs - *out = make([]PodIP, len(*in)) - copy(*out, *in) - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EphemeralContainerStatuses != nil { - in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ResourceClaimStatuses != nil { - in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses - *out = make([]PodResourceClaimStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. -func (in *PodStatusResult) DeepCopy() *PodStatusResult { - if in == nil { - return nil - } - out := new(PodStatusResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodStatusResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. -func (in *PodTemplate) DeepCopy() *PodTemplate { - if in == nil { - return nil - } - out := new(PodTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. -func (in *PodTemplateList) DeepCopy() *PodTemplateList { - if in == nil { - return nil - } - out := new(PodTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. -func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { - if in == nil { - return nil - } - out := new(PodTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortStatus) DeepCopyInto(out *PortStatus) { - *out = *in - if in.Error != nil { - in, out := &in.Error, &out.Error - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus. -func (in *PortStatus) DeepCopy() *PortStatus { - if in == nil { - return nil - } - out := new(PortStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. -func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { - if in == nil { - return nil - } - out := new(PortworxVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preconditions) DeepCopyInto(out *Preconditions) { - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - *out = new(types.UID) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. -func (in *Preconditions) DeepCopy() *Preconditions { - if in == nil { - return nil - } - out := new(Preconditions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { - *out = *in - in.PodSignature.DeepCopyInto(&out.PodSignature) - in.EvictionTime.DeepCopyInto(&out.EvictionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. -func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { - if in == nil { - return nil - } - out := new(PreferAvoidPodsEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { - *out = *in - in.Preference.DeepCopyInto(&out.Preference) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. -func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { - if in == nil { - return nil - } - out := new(PreferredSchedulingTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Probe) DeepCopyInto(out *Probe) { - *out = *in - in.ProbeHandler.DeepCopyInto(&out.ProbeHandler) - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. -func (in *Probe) DeepCopy() *Probe { - if in == nil { - return nil - } - out := new(Probe) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProbeHandler) DeepCopyInto(out *ProbeHandler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - if in.GRPC != nil { - in, out := &in.GRPC, &out.GRPC - *out = new(GRPCAction) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeHandler. -func (in *ProbeHandler) DeepCopy() *ProbeHandler { - if in == nil { - return nil - } - out := new(ProbeHandler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. -func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { - if in == nil { - return nil - } - out := new(ProjectedVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. -func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { - if in == nil { - return nil - } - out := new(QuobyteVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. -func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { - if in == nil { - return nil - } - out := new(RBDPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. -func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { - if in == nil { - return nil - } - out := new(RBDVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. -func (in *RangeAllocation) DeepCopy() *RangeAllocation { - if in == nil { - return nil - } - out := new(RangeAllocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RangeAllocation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. -func (in *ReplicationController) DeepCopy() *ReplicationController { - if in == nil { - return nil - } - out := new(ReplicationController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. -func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { - if in == nil { - return nil - } - out := new(ReplicationControllerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. -func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { - if in == nil { - return nil - } - out := new(ReplicationControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. -func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { - if in == nil { - return nil - } - out := new(ReplicationControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. -func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { - if in == nil { - return nil - } - out := new(ReplicationControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim. -func (in *ResourceClaim) DeepCopy() *ResourceClaim { - if in == nil { - return nil - } - out := new(ResourceClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. -func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { - if in == nil { - return nil - } - out := new(ResourceFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ResourceList) DeepCopyInto(out *ResourceList) { - { - in := &in - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. -func (in ResourceList) DeepCopy() ResourceList { - if in == nil { - return nil - } - out := new(ResourceList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. -func (in *ResourceQuota) DeepCopy() *ResourceQuota { - if in == nil { - return nil - } - out := new(ResourceQuota) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuota) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. -func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { - if in == nil { - return nil - } - out := new(ResourceQuotaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - if in.ScopeSelector != nil { - in, out := &in.ScopeSelector, &out.ScopeSelector - *out = new(ScopeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. -func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { - if in == nil { - return nil - } - out := new(ResourceQuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. -func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { - if in == nil { - return nil - } - out := new(ResourceQuotaStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Claims != nil { - in, out := &in.Claims, &out.Claims - *out = make([]ResourceClaim, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. -func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { - if in == nil { - return nil - } - out := new(SELinuxOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. -func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. -func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]ScopedResourceSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. -func (in *ScopeSelector) DeepCopy() *ScopeSelector { - if in == nil { - return nil - } - out := new(ScopeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. -func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { - if in == nil { - return nil - } - out := new(ScopedResourceSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SeccompProfile) DeepCopyInto(out *SeccompProfile) { - *out = *in - if in.LocalhostProfile != nil { - in, out := &in.LocalhostProfile, &out.LocalhostProfile - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeccompProfile. -func (in *SeccompProfile) DeepCopy() *SeccompProfile { - if in == nil { - return nil - } - out := new(SeccompProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Secret) DeepCopyInto(out *Secret) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Immutable != nil { - in, out := &in.Immutable, &out.Immutable - *out = new(bool) - **out = **in - } - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - var outVal []byte - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]byte, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. -func (in *Secret) DeepCopy() *Secret { - if in == nil { - return nil - } - out := new(Secret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Secret) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. -func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { - if in == nil { - return nil - } - out := new(SecretEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretList) DeepCopyInto(out *SecretList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. -func (in *SecretList) DeepCopy() *SecretList { - if in == nil { - return nil - } - out := new(SecretList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecretList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. -func (in *SecretProjection) DeepCopy() *SecretProjection { - if in == nil { - return nil - } - out := new(SecretProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. -func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { - if in == nil { - return nil - } - out := new(SecretVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.WindowsOptions != nil { - in, out := &in.WindowsOptions, &out.WindowsOptions - *out = new(WindowsSecurityContextOptions) - (*in).DeepCopyInto(*out) - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.ProcMount != nil { - in, out := &in.ProcMount, &out.ProcMount - *out = new(ProcMountType) - **out = **in - } - if in.SeccompProfile != nil { - in, out := &in.SeccompProfile, &out.SeccompProfile - *out = new(SeccompProfile) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. -func (in *SecurityContext) DeepCopy() *SecurityContext { - if in == nil { - return nil - } - out := new(SecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { - *out = *in - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. -func (in *SerializedReference) DeepCopy() *SerializedReference { - if in == nil { - return nil - } - out := new(SerializedReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SerializedReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Service) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. -func (in *ServiceAccount) DeepCopy() *ServiceAccount { - if in == nil { - return nil - } - out := new(ServiceAccount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccount) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. -func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { - if in == nil { - return nil - } - out := new(ServiceAccountList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccountList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. -func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { - if in == nil { - return nil - } - out := new(ServiceAccountTokenProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceList) DeepCopyInto(out *ServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. -func (in *ServiceList) DeepCopy() *ServiceList { - if in == nil { - return nil - } - out := new(ServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePort) DeepCopyInto(out *ServicePort) { - *out = *in - if in.AppProtocol != nil { - in, out := &in.AppProtocol, &out.AppProtocol - *out = new(string) - **out = **in - } - out.TargetPort = in.TargetPort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. -func (in *ServicePort) DeepCopy() *ServicePort { - if in == nil { - return nil - } - out := new(ServicePort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. -func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { - if in == nil { - return nil - } - out := new(ServiceProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ClusterIPs != nil { - in, out := &in.ClusterIPs, &out.ClusterIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IPFamilies != nil { - in, out := &in.IPFamilies, &out.IPFamilies - *out = make([]IPFamily, len(*in)) - copy(*out, *in) - } - if in.IPFamilyPolicy != nil { - in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy - *out = new(IPFamilyPolicy) - **out = **in - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SessionAffinityConfig != nil { - in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllocateLoadBalancerNodePorts != nil { - in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts - *out = new(bool) - **out = **in - } - if in.LoadBalancerClass != nil { - in, out := &in.LoadBalancerClass, &out.LoadBalancerClass - *out = new(string) - **out = **in - } - if in.InternalTrafficPolicy != nil { - in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy - *out = new(ServiceInternalTrafficPolicy) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. -func (in *ServiceSpec) DeepCopy() *ServiceSpec { - if in == nil { - return nil - } - out := new(ServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { - *out = *in - if in.ClientIP != nil { - in, out := &in.ClientIP, &out.ClientIP - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. -func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { - if in == nil { - return nil - } - out := new(SessionAffinityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. -func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. -func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sysctl) DeepCopyInto(out *Sysctl) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. -func (in *Sysctl) DeepCopy() *Sysctl { - if in == nil { - return nil - } - out := new(Sysctl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { - *out = *in - out.Port = in.Port - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. -func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { - if in == nil { - return nil - } - out := new(TCPSocketAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in - if in.TimeAdded != nil { - in, out := &in.TimeAdded, &out.TimeAdded - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. -func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { - if in == nil { - return nil - } - out := new(TopologySelectorLabelRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { - *out = *in - if in.MatchLabelExpressions != nil { - in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions - *out = make([]TopologySelectorLabelRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. -func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { - if in == nil { - return nil - } - out := new(TopologySelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.MinDomains != nil { - in, out := &in.MinDomains, &out.MinDomains - *out = new(int32) - **out = **in - } - if in.NodeAffinityPolicy != nil { - in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy - *out = new(NodeInclusionPolicy) - **out = **in - } - if in.NodeTaintsPolicy != nil { - in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy - *out = new(NodeInclusionPolicy) - **out = **in - } - if in.MatchLabelKeys != nil { - in, out := &in.MatchLabelKeys, &out.MatchLabelKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. -func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { - if in == nil { - return nil - } - out := new(TopologySpreadConstraint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { - *out = *in - if in.APIGroup != nil { - in, out := &in.APIGroup, &out.APIGroup - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. -func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { - if in == nil { - return nil - } - out := new(TypedLocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) { - *out = *in - if in.APIGroup != nil { - in, out := &in.APIGroup, &out.APIGroup - *out = new(string) - **out = **in - } - if in.Namespace != nil { - in, out := &in.Namespace, &out.Namespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference. -func (in *TypedObjectReference) DeepCopy() *TypedObjectReference { - if in == nil { - return nil - } - out := new(TypedObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. -func (in *VolumeDevice) DeepCopy() *VolumeDevice { - if in == nil { - return nil - } - out := new(VolumeDevice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in - if in.MountPropagation != nil { - in, out := &in.MountPropagation, &out.MountPropagation - *out = new(MountPropagationMode) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { - *out = *in - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. -func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { - if in == nil { - return nil - } - out := new(VolumeNodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } - if in.ServiceAccountToken != nil { - in, out := &in.ServiceAccountToken, &out.ServiceAccountToken - *out = new(ServiceAccountTokenProjection) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. -func (in *VolumeProjection) DeepCopy() *VolumeProjection { - if in == nil { - return nil - } - out := new(VolumeProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - **out = **in - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CSI != nil { - in, out := &in.CSI, &out.CSI - *out = new(CSIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Ephemeral != nil { - in, out := &in.Ephemeral, &out.Ephemeral - *out = new(EphemeralVolumeSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. -func (in *VolumeSource) DeepCopy() *VolumeSource { - if in == nil { - return nil - } - out := new(VolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. -func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { - if in == nil { - return nil - } - out := new(VsphereVirtualDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { - *out = *in - in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. -func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { - if in == nil { - return nil - } - out := new(WeightedPodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { - *out = *in - if in.GMSACredentialSpecName != nil { - in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName - *out = new(string) - **out = **in - } - if in.GMSACredentialSpec != nil { - in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec - *out = new(string) - **out = **in - } - if in.RunAsUserName != nil { - in, out := &in.RunAsUserName, &out.RunAsUserName - *out = new(string) - **out = **in - } - if in.HostProcess != nil { - in, out := &in.HostProcess, &out.HostProcess - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. -func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { - if in == nil { - return nil - } - out := new(WindowsSecurityContextOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index b8103223a..b673a6425 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -14,12 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Deprecated: Use functions in k8s.io/utils/ptr instead: ptr.To to obtain +// a pointer, ptr.Deref to dereference a pointer, ptr.Equal to compare +// dereferenced pointers. package pointer import ( - "fmt" - "reflect" "time" + + "k8s.io/utils/ptr" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -28,383 +31,219 @@ import ( // // This function is only valid for structs and pointers to structs. Any other // type will cause a panic. Passing a typed nil pointer will return true. -func AllPtrFieldsNil(obj interface{}) bool { - v := reflect.ValueOf(obj) - if !v.IsValid() { - panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - for i := 0; i < v.NumField(); i++ { - if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { - return false - } - } - return true -} - -// Int returns a pointer to an int -func Int(i int) *int { - return &i -} +// +// Deprecated: Use ptr.AllPtrFieldsNil instead. +var AllPtrFieldsNil = ptr.AllPtrFieldsNil + +// Int returns a pointer to an int. +var Int = ptr.To[int] // IntPtr is a function variable referring to Int. // -// Deprecated: Use Int instead. +// Deprecated: Use ptr.To instead. var IntPtr = Int // for back-compat // IntDeref dereferences the int ptr and returns it if not nil, or else // returns def. -func IntDeref(ptr *int, def int) int { - if ptr != nil { - return *ptr - } - return def -} +var IntDeref = ptr.Deref[int] // IntPtrDerefOr is a function variable referring to IntDeref. // -// Deprecated: Use IntDeref instead. +// Deprecated: Use ptr.Deref instead. var IntPtrDerefOr = IntDeref // for back-compat // Int32 returns a pointer to an int32. -func Int32(i int32) *int32 { - return &i -} +var Int32 = ptr.To[int32] // Int32Ptr is a function variable referring to Int32. // -// Deprecated: Use Int32 instead. +// Deprecated: Use ptr.To instead. var Int32Ptr = Int32 // for back-compat // Int32Deref dereferences the int32 ptr and returns it if not nil, or else // returns def. -func Int32Deref(ptr *int32, def int32) int32 { - if ptr != nil { - return *ptr - } - return def -} +var Int32Deref = ptr.Deref[int32] // Int32PtrDerefOr is a function variable referring to Int32Deref. // -// Deprecated: Use Int32Deref instead. +// Deprecated: Use ptr.Deref instead. var Int32PtrDerefOr = Int32Deref // for back-compat // Int32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Int32Equal(a, b *int32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Int32Equal = ptr.Equal[int32] // Uint returns a pointer to an uint -func Uint(i uint) *uint { - return &i -} +var Uint = ptr.To[uint] // UintPtr is a function variable referring to Uint. // -// Deprecated: Use Uint instead. +// Deprecated: Use ptr.To instead. var UintPtr = Uint // for back-compat // UintDeref dereferences the uint ptr and returns it if not nil, or else // returns def. -func UintDeref(ptr *uint, def uint) uint { - if ptr != nil { - return *ptr - } - return def -} +var UintDeref = ptr.Deref[uint] // UintPtrDerefOr is a function variable referring to UintDeref. // -// Deprecated: Use UintDeref instead. +// Deprecated: Use ptr.Deref instead. var UintPtrDerefOr = UintDeref // for back-compat // Uint32 returns a pointer to an uint32. -func Uint32(i uint32) *uint32 { - return &i -} +var Uint32 = ptr.To[uint32] // Uint32Ptr is a function variable referring to Uint32. // -// Deprecated: Use Uint32 instead. +// Deprecated: Use ptr.To instead. var Uint32Ptr = Uint32 // for back-compat // Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else // returns def. -func Uint32Deref(ptr *uint32, def uint32) uint32 { - if ptr != nil { - return *ptr - } - return def -} +var Uint32Deref = ptr.Deref[uint32] // Uint32PtrDerefOr is a function variable referring to Uint32Deref. // -// Deprecated: Use Uint32Deref instead. +// Deprecated: Use ptr.Deref instead. var Uint32PtrDerefOr = Uint32Deref // for back-compat // Uint32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Uint32Equal(a, b *uint32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Uint32Equal = ptr.Equal[uint32] // Int64 returns a pointer to an int64. -func Int64(i int64) *int64 { - return &i -} +var Int64 = ptr.To[int64] // Int64Ptr is a function variable referring to Int64. // -// Deprecated: Use Int64 instead. +// Deprecated: Use ptr.To instead. var Int64Ptr = Int64 // for back-compat // Int64Deref dereferences the int64 ptr and returns it if not nil, or else // returns def. -func Int64Deref(ptr *int64, def int64) int64 { - if ptr != nil { - return *ptr - } - return def -} +var Int64Deref = ptr.Deref[int64] // Int64PtrDerefOr is a function variable referring to Int64Deref. // -// Deprecated: Use Int64Deref instead. +// Deprecated: Use ptr.Deref instead. var Int64PtrDerefOr = Int64Deref // for back-compat // Int64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Int64Equal(a, b *int64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Int64Equal = ptr.Equal[int64] // Uint64 returns a pointer to an uint64. -func Uint64(i uint64) *uint64 { - return &i -} +var Uint64 = ptr.To[uint64] // Uint64Ptr is a function variable referring to Uint64. // -// Deprecated: Use Uint64 instead. +// Deprecated: Use ptr.To instead. var Uint64Ptr = Uint64 // for back-compat // Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else // returns def. -func Uint64Deref(ptr *uint64, def uint64) uint64 { - if ptr != nil { - return *ptr - } - return def -} +var Uint64Deref = ptr.Deref[uint64] // Uint64PtrDerefOr is a function variable referring to Uint64Deref. // -// Deprecated: Use Uint64Deref instead. +// Deprecated: Use ptr.Deref instead. var Uint64PtrDerefOr = Uint64Deref // for back-compat // Uint64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Uint64Equal(a, b *uint64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Uint64Equal = ptr.Equal[uint64] // Bool returns a pointer to a bool. -func Bool(b bool) *bool { - return &b -} +var Bool = ptr.To[bool] // BoolPtr is a function variable referring to Bool. // -// Deprecated: Use Bool instead. +// Deprecated: Use ptr.To instead. var BoolPtr = Bool // for back-compat // BoolDeref dereferences the bool ptr and returns it if not nil, or else // returns def. -func BoolDeref(ptr *bool, def bool) bool { - if ptr != nil { - return *ptr - } - return def -} +var BoolDeref = ptr.Deref[bool] // BoolPtrDerefOr is a function variable referring to BoolDeref. // -// Deprecated: Use BoolDeref instead. +// Deprecated: Use ptr.Deref instead. var BoolPtrDerefOr = BoolDeref // for back-compat // BoolEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func BoolEqual(a, b *bool) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var BoolEqual = ptr.Equal[bool] // String returns a pointer to a string. -func String(s string) *string { - return &s -} +var String = ptr.To[string] // StringPtr is a function variable referring to String. // -// Deprecated: Use String instead. +// Deprecated: Use ptr.To instead. var StringPtr = String // for back-compat // StringDeref dereferences the string ptr and returns it if not nil, or else // returns def. -func StringDeref(ptr *string, def string) string { - if ptr != nil { - return *ptr - } - return def -} +var StringDeref = ptr.Deref[string] // StringPtrDerefOr is a function variable referring to StringDeref. // -// Deprecated: Use StringDeref instead. +// Deprecated: Use ptr.Deref instead. var StringPtrDerefOr = StringDeref // for back-compat // StringEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func StringEqual(a, b *string) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var StringEqual = ptr.Equal[string] // Float32 returns a pointer to a float32. -func Float32(i float32) *float32 { - return &i -} +var Float32 = ptr.To[float32] // Float32Ptr is a function variable referring to Float32. // -// Deprecated: Use Float32 instead. +// Deprecated: Use ptr.To instead. var Float32Ptr = Float32 // Float32Deref dereferences the float32 ptr and returns it if not nil, or else // returns def. -func Float32Deref(ptr *float32, def float32) float32 { - if ptr != nil { - return *ptr - } - return def -} +var Float32Deref = ptr.Deref[float32] // Float32PtrDerefOr is a function variable referring to Float32Deref. // -// Deprecated: Use Float32Deref instead. +// Deprecated: Use ptr.Deref instead. var Float32PtrDerefOr = Float32Deref // for back-compat // Float32Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Float32Equal(a, b *float32) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Float32Equal = ptr.Equal[float32] // Float64 returns a pointer to a float64. -func Float64(i float64) *float64 { - return &i -} +var Float64 = ptr.To[float64] // Float64Ptr is a function variable referring to Float64. // -// Deprecated: Use Float64 instead. +// Deprecated: Use ptr.To instead. var Float64Ptr = Float64 // Float64Deref dereferences the float64 ptr and returns it if not nil, or else // returns def. -func Float64Deref(ptr *float64, def float64) float64 { - if ptr != nil { - return *ptr - } - return def -} +var Float64Deref = ptr.Deref[float64] // Float64PtrDerefOr is a function variable referring to Float64Deref. // -// Deprecated: Use Float64Deref instead. +// Deprecated: Use ptr.Deref instead. var Float64PtrDerefOr = Float64Deref // for back-compat // Float64Equal returns true if both arguments are nil or both arguments // dereference to the same value. -func Float64Equal(a, b *float64) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var Float64Equal = ptr.Equal[float64] // Duration returns a pointer to a time.Duration. -func Duration(d time.Duration) *time.Duration { - return &d -} +var Duration = ptr.To[time.Duration] // DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else // returns def. -func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { - if ptr != nil { - return *ptr - } - return def -} +var DurationDeref = ptr.Deref[time.Duration] // DurationEqual returns true if both arguments are nil or both arguments // dereference to the same value. -func DurationEqual(a, b *time.Duration) bool { - if (a == nil) != (b == nil) { - return false - } - if a == nil { - return true - } - return *a == *b -} +var DurationEqual = ptr.Equal[time.Duration] diff --git a/vendor/k8s.io/utils/ptr/OWNERS b/vendor/k8s.io/utils/ptr/OWNERS new file mode 100644 index 000000000..0d6392752 --- /dev/null +++ b/vendor/k8s.io/utils/ptr/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/k8s.io/utils/ptr/README.md b/vendor/k8s.io/utils/ptr/README.md new file mode 100644 index 000000000..2ca8073dc --- /dev/null +++ b/vendor/k8s.io/utils/ptr/README.md @@ -0,0 +1,3 @@ +# Pointer + +This package provides some functions for pointer-based operations. diff --git a/vendor/k8s.io/utils/ptr/ptr.go b/vendor/k8s.io/utils/ptr/ptr.go new file mode 100644 index 000000000..659ed3b9e --- /dev/null +++ b/vendor/k8s.io/utils/ptr/ptr.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// To returns a pointer to the given value. +func To[T any](v T) *T { + return &v +} + +// Deref dereferences ptr and returns the value it points to if no nil, or else +// returns def. +func Deref[T any](ptr *T, def T) T { + if ptr != nil { + return *ptr + } + return def +} + +// Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Equal[T comparable](a, b *T) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 655f3bae3..c2b0d6085 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -183,10 +183,11 @@ github.com/go-errors/errors # github.com/go-gorp/gorp/v3 v3.1.0 ## explicit; go 1.18 github.com/go-gorp/gorp/v3 -# github.com/go-logr/logr v1.2.4 -## explicit; go 1.16 +# github.com/go-logr/logr v1.3.0 +## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr +github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr @@ -394,7 +395,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.28.1 +# github.com/onsi/gomega v1.29.0 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -498,7 +499,7 @@ github.com/xlab/treeprint # github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 ## explicit github.com/xrash/smetrics -# go.opentelemetry.io/otel v1.19.0 +# go.opentelemetry.io/otel v1.20.0 ## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -512,13 +513,14 @@ go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/internal/v2 go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.17.0/httpconv -# go.opentelemetry.io/otel/metric v1.19.0 +# go.opentelemetry.io/otel/metric v1.20.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.19.0 +# go.opentelemetry.io/otel/trace v1.20.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace +go.opentelemetry.io/otel/trace/embedded # go.starlark.net v0.0.0-20230525235612-a134d8f9ddca ## explicit; go 1.16 go.starlark.net/internal/compile @@ -527,7 +529,7 @@ go.starlark.net/resolve go.starlark.net/starlark go.starlark.net/starlarkstruct go.starlark.net/syntax -# golang.org/x/crypto v0.15.0 +# golang.org/x/crypto v0.17.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -541,10 +543,10 @@ golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt -# golang.org/x/mod v0.13.0 +# golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.18.0 +# golang.org/x/net v0.19.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html @@ -558,21 +560,21 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.11.0 +# golang.org/x/oauth2 v0.14.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.4.0 -## explicit; go 1.17 +# golang.org/x/sync v0.5.0 +## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.14.1-0.20231113162313-11eadc05e9bf +# golang.org/x/sys v0.15.0 ## explicit; go 1.18 golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.14.0 +# golang.org/x/term v0.15.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -597,14 +599,14 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.5.0 +## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.14.0 +# golang.org/x/tools v0.16.0 ## explicit; go 1.18 golang.org/x/tools/go/ast/inspector golang.org/x/tools/internal/typeparams -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine/internal google.golang.org/appengine/internal/base @@ -613,7 +615,7 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.59.0 @@ -749,7 +751,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.28.3 => k8s.io/api v0.28.3 +# k8s.io/api v0.29.0 => k8s.io/api v0.28.3 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -806,7 +808,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.28.2 => k8s.io/apiextensions-apiserver v0.28.3 +# k8s.io/apiextensions-apiserver v0.29.0 => k8s.io/apiextensions-apiserver v0.28.3 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -817,7 +819,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.28.3 => k8s.io/apimachinery v0.28.3 +# k8s.io/apimachinery v0.29.0 => k8s.io/apimachinery v0.28.3 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -876,7 +878,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.28.3 => k8s.io/apiserver v0.28.3 +# k8s.io/apiserver v0.29.0 => k8s.io/apiserver v0.28.3 ## explicit; go 1.20 k8s.io/apiserver/pkg/endpoints/deprecation # k8s.io/cli-runtime v0.28.3 => k8s.io/cli-runtime v0.28.3 @@ -885,7 +887,7 @@ k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.28.3 => k8s.io/client-go v0.28.3 +# k8s.io/client-go v0.29.0 => k8s.io/client-go v0.28.3 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1035,10 +1037,10 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.28.3 => k8s.io/component-base v0.28.3 +# k8s.io/component-base v0.29.0 => k8s.io/component-base v0.28.3 ## explicit; go 1.20 k8s.io/component-base/version -# k8s.io/klog/v2 v2.100.1 +# k8s.io/klog/v2 v2.110.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -1046,22 +1048,20 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 +k8s.io/klog/v2/internal/sloghandler +# k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ## explicit; go 1.19 -k8s.io/kube-openapi/pkg/builder3/util k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json -k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv -k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.28.2 => k8s.io/kubectl v0.28.3 +# k8s.io/kubectl v0.29.0 => k8s.io/kubectl v0.28.3 ## explicit; go 1.20 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme @@ -1071,14 +1071,10 @@ k8s.io/kubectl/pkg/util/openapi k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.28.2 => k8s.io/kubelet v0.28.3 +# k8s.io/kubelet v0.29.0 => k8s.io/kubelet v0.28.3 ## explicit; go 1.20 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 -# k8s.io/kubernetes v1.28.3 -## explicit; go 1.20 -k8s.io/kubernetes/pkg/apis/core -k8s.io/kubernetes/pkg/apis/core/helper -# k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 +# k8s.io/utils v0.0.0-20230726121419-3b25d923346b ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1088,6 +1084,7 @@ k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/pointer +k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace # oras.land/oras-go v1.2.4 @@ -1191,15 +1188,13 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/node-feature-discovery v0.14.2 -## explicit; go 1.20 +# sigs.k8s.io/node-feature-discovery v0.15.1 +## explicit; go 1.21 sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1 sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned/scheme sigs.k8s.io/node-feature-discovery/pkg/generated/clientset/versioned/typed/nfd/v1alpha1 -sigs.k8s.io/node-feature-discovery/pkg/utils -sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath -# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/annotations_labels.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/annotations_labels.go index 3ed45beb0..b1f848245 100644 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/annotations_labels.go +++ b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/annotations_labels.go @@ -51,6 +51,7 @@ const ( FeatureLabelsAnnotation = AnnotationNs + "/feature-labels" // MasterVersionAnnotation is the annotation that holds the version of nfd-master running on the node + // DEPRECATED: will not be used in NFD v0.15 or later. MasterVersionAnnotation = AnnotationNs + "/master.version" // WorkerVersionAnnotation is the annotation that holds the version of nfd-worker running on the node @@ -59,9 +60,18 @@ const ( // NodeTaintsAnnotation is the annotation that holds the taints that nfd-master set on the node NodeTaintsAnnotation = AnnotationNs + "/taints" + // FeatureAnnotationsTrackingAnnotation is the annotation that holds all feature annotations that nfd-master set on the node + FeatureAnnotationsTrackingAnnotation = AnnotationNs + "/feature-annotations" + // NodeFeatureObjNodeNameLabel is the label that specifies which node the // NodeFeature object is targeting. Creators of NodeFeature objects must // set this label and consumers of the objects are supposed to use the // label for filtering features designated for a certain node. NodeFeatureObjNodeNameLabel = "nfd.node.kubernetes.io/node-name" + + // FeatureAnnotationNs is the (default) namespace for feature annotations. + FeatureAnnotationNs = "feature.node.kubernetes.io" + + // FeatureAnnotationSubNsSuffix is the suffix for allowed feature annotation sub-namespaces. + FeatureAnnotationSubNsSuffix = "." + FeatureAnnotationNs ) diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/expression.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/expression.go deleted file mode 100644 index febf5faa5..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/expression.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "encoding/json" - "fmt" - "regexp" - "sort" - "strconv" - "strings" - - "k8s.io/klog/v2" -) - -var matchOps = map[MatchOp]struct{}{ - MatchAny: {}, - MatchIn: {}, - MatchNotIn: {}, - MatchInRegexp: {}, - MatchExists: {}, - MatchDoesNotExist: {}, - MatchGt: {}, - MatchLt: {}, - MatchGtLt: {}, - MatchIsTrue: {}, - MatchIsFalse: {}, -} - -type valueRegexpCache []*regexp.Regexp - -// CreateMatchExpression creates a new MatchExpression instance. Returns an -// error if validation fails. -func CreateMatchExpression(op MatchOp, values ...string) (*MatchExpression, error) { - m := newMatchExpression(op, values...) - return m, m.Validate() -} - -// MustCreateMatchExpression creates a new MatchExpression instance. Panics if -// validation fails. -func MustCreateMatchExpression(op MatchOp, values ...string) *MatchExpression { - m, err := CreateMatchExpression(op, values...) - if err != nil { - panic(err) - } - return m -} - -// newMatchExpression returns a new MatchExpression instance. -func newMatchExpression(op MatchOp, values ...string) *MatchExpression { - return &MatchExpression{ - Op: op, - Value: values, - } -} - -// Validate validates the expression. -func (m *MatchExpression) Validate() error { - m.valueRe = nil - - if _, ok := matchOps[m.Op]; !ok { - return fmt.Errorf("invalid Op %q", m.Op) - } - switch m.Op { - case MatchExists, MatchDoesNotExist, MatchIsTrue, MatchIsFalse, MatchAny: - if len(m.Value) != 0 { - return fmt.Errorf("value must be empty for Op %q (have %v)", m.Op, m.Value) - } - case MatchGt, MatchLt: - if len(m.Value) != 1 { - return fmt.Errorf("value must contain exactly one element for Op %q (have %v)", m.Op, m.Value) - } - if _, err := strconv.Atoi(m.Value[0]); err != nil { - return fmt.Errorf("value must be an integer for Op %q (have %v)", m.Op, m.Value[0]) - } - case MatchGtLt: - if len(m.Value) != 2 { - return fmt.Errorf("value must contain exactly two elements for Op %q (have %v)", m.Op, m.Value) - } - var err error - v := make([]int, 2) - for i := 0; i < 2; i++ { - if v[i], err = strconv.Atoi(m.Value[i]); err != nil { - return fmt.Errorf("value must contain integers for Op %q (have %v)", m.Op, m.Value) - } - } - if v[0] >= v[1] { - return fmt.Errorf("value[0] must be less than Value[1] for Op %q (have %v)", m.Op, m.Value) - } - case MatchInRegexp: - if len(m.Value) == 0 { - return fmt.Errorf("value must be non-empty for Op %q", m.Op) - } - m.valueRe = make([]*regexp.Regexp, len(m.Value)) - for i, v := range m.Value { - re, err := regexp.Compile(v) - if err != nil { - return fmt.Errorf("value must only contain valid regexps for Op %q (have %v)", m.Op, m.Value) - } - m.valueRe[i] = re - } - default: - if len(m.Value) == 0 { - return fmt.Errorf("value must be non-empty for Op %q", m.Op) - } - } - return nil -} - -// Match evaluates the MatchExpression against a single input value. -func (m *MatchExpression) Match(valid bool, value interface{}) (bool, error) { - switch m.Op { - case MatchAny: - return true, nil - case MatchExists: - return valid, nil - case MatchDoesNotExist: - return !valid, nil - } - - if valid { - value := fmt.Sprintf("%v", value) - switch m.Op { - case MatchIn: - for _, v := range m.Value { - if value == v { - return true, nil - } - } - case MatchNotIn: - for _, v := range m.Value { - if value == v { - return false, nil - } - } - return true, nil - case MatchInRegexp: - if m.valueRe == nil { - return false, fmt.Errorf("BUG: MatchExpression has not been initialized properly, regexps missing") - } - for _, re := range m.valueRe { - if re.MatchString(value) { - return true, nil - } - } - case MatchGt, MatchLt: - l, err := strconv.Atoi(value) - if err != nil { - return false, fmt.Errorf("not a number %q", value) - } - r, err := strconv.Atoi(m.Value[0]) - if err != nil { - return false, fmt.Errorf("not a number %q in %v", m.Value[0], m) - } - - if (l < r && m.Op == MatchLt) || (l > r && m.Op == MatchGt) { - return true, nil - } - case MatchGtLt: - v, err := strconv.Atoi(value) - if err != nil { - return false, fmt.Errorf("not a number %q", value) - } - lr := make([]int, 2) - for i := 0; i < 2; i++ { - lr[i], err = strconv.Atoi(m.Value[i]) - if err != nil { - return false, fmt.Errorf("not a number %q in %v", m.Value[i], m) - } - } - return v > lr[0] && v < lr[1], nil - case MatchIsTrue: - return value == "true", nil - case MatchIsFalse: - return value == "false", nil - default: - return false, fmt.Errorf("unsupported Op %q", m.Op) - } - } - return false, nil -} - -// MatchKeys evaluates the MatchExpression against a set of keys. -func (m *MatchExpression) MatchKeys(name string, keys map[string]Nil) (bool, error) { - matched := false - - _, ok := keys[name] - switch m.Op { - case MatchAny: - matched = true - case MatchExists: - matched = ok - case MatchDoesNotExist: - matched = !ok - default: - return false, fmt.Errorf("invalid Op %q when matching keys", m.Op) - } - - if klogV := klog.V(3); klogV.Enabled() { - klogV.InfoS("matched keys", "matchResult", "matched", "matchKey", name, "matchOp", m.Op) - } else if klogV := klog.V(4); klogV.Enabled() { - k := make([]string, 0, len(keys)) - for n := range keys { - k = append(k, n) - } - sort.Strings(k) - klogV.InfoS("matched keys", "matchResult", "matched", "matchKey", name, "matchOp", m.Op, "inputKeys", k) - } - return matched, nil -} - -// MatchValues evaluates the MatchExpression against a set of key-value pairs. -func (m *MatchExpression) MatchValues(name string, values map[string]string) (bool, error) { - v, ok := values[name] - matched, err := m.Match(ok, v) - if err != nil { - return false, err - } - - if klogV := klog.V(3); klogV.Enabled() { - klogV.InfoS("matched values", "matchResult", "matched", "matchKey", name, "matchOp", m.Op, "matchValue", m.Value) - } else if klogV := klog.V(4); klogV.Enabled() { - klogV.InfoS("matched values", "matchResult", "matched", "matchKey", name, "matchOp", m.Op, "matchValue", m.Value, "inputValues", values) - } - - return matched, nil -} - -// matchExpression is a helper type for unmarshalling MatchExpression -type matchExpression MatchExpression - -// UnmarshalJSON implements the Unmarshaler interface of "encoding/json" -func (m *MatchExpression) UnmarshalJSON(data []byte) error { - raw := new(interface{}) - - err := json.Unmarshal(data, raw) - if err != nil { - return err - } - - switch v := (*raw).(type) { - case string: - *m = *newMatchExpression(MatchIn, v) - case bool: - *m = *newMatchExpression(MatchIn, strconv.FormatBool(v)) - case float64: - *m = *newMatchExpression(MatchIn, strconv.FormatFloat(v, 'f', -1, 64)) - case []interface{}: - values := make([]string, len(v)) - for i, value := range v { - str, ok := value.(string) - if !ok { - return fmt.Errorf("invalid value %v in %v", value, v) - } - values[i] = str - } - *m = *newMatchExpression(MatchIn, values...) - case map[string]interface{}: - helper := &matchExpression{} - if err := json.Unmarshal(data, &helper); err != nil { - return err - } - *m = *newMatchExpression(helper.Op, helper.Value...) - default: - return fmt.Errorf("invalid rule '%v' (%T)", v, v) - } - - return m.Validate() -} - -// MatchKeys evaluates the MatchExpressionSet against a set of keys. -func (m *MatchExpressionSet) MatchKeys(keys map[string]Nil) (bool, error) { - matched, _, err := m.MatchGetKeys(keys) - return matched, err -} - -// MatchedKey holds one matched key. -type MatchedKey struct { - Name string -} - -// MatchGetKeys evaluates the MatchExpressionSet against a set of keys and -// returns all matched keys or nil if no match was found. Special case of an -// empty MatchExpressionSet returns all existing keys are returned. Note that -// an empty MatchExpressionSet and an empty set of keys returns an empty slice -// which is not nil and is treated as a match. -func (m *MatchExpressionSet) MatchGetKeys(keys map[string]Nil) (bool, []MatchedKey, error) { - ret := make([]MatchedKey, 0, len(*m)) - - for n, e := range *m { - match, err := e.MatchKeys(n, keys) - if err != nil { - return false, nil, err - } - if !match { - return false, nil, nil - } - ret = append(ret, MatchedKey{Name: n}) - } - // Sort for reproducible output - sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) - return true, ret, nil -} - -// MatchValues evaluates the MatchExpressionSet against a set of key-value pairs. -func (m *MatchExpressionSet) MatchValues(values map[string]string) (bool, error) { - matched, _, err := m.MatchGetValues(values) - return matched, err -} - -// MatchedValue holds one matched key-value pair. -type MatchedValue struct { - Name string - Value string -} - -// MatchGetValues evaluates the MatchExpressionSet against a set of key-value -// pairs and returns all matched key-value pairs. Special case of an empty -// MatchExpressionSet returns all existing key-value pairs. Note that an empty -// MatchExpressionSet and an empty set of values returns an empty non-nil map -// which is treated as a match. -func (m *MatchExpressionSet) MatchGetValues(values map[string]string) (bool, []MatchedValue, error) { - ret := make([]MatchedValue, 0, len(*m)) - - for n, e := range *m { - match, err := e.MatchValues(n, values) - if err != nil { - return false, nil, err - } - if !match { - return false, nil, nil - } - ret = append(ret, MatchedValue{Name: n, Value: values[n]}) - } - // Sort for reproducible output - sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) - return true, ret, nil -} - -// MatchInstances evaluates the MatchExpressionSet against a set of instance -// features, each of which is an individual set of key-value pairs -// (attributes). -func (m *MatchExpressionSet) MatchInstances(instances []InstanceFeature) (bool, error) { - v, err := m.MatchGetInstances(instances) - return len(v) > 0, err -} - -// MatchedInstance holds one matched Instance. -type MatchedInstance map[string]string - -// MatchGetInstances evaluates the MatchExpressionSet against a set of instance -// features, each of which is an individual set of key-value pairs -// (attributes). A slice containing all matching instances is returned. An -// empty (non-nil) slice is returned if no matching instances were found. -func (m *MatchExpressionSet) MatchGetInstances(instances []InstanceFeature) ([]MatchedInstance, error) { - ret := []MatchedInstance{} - - for _, i := range instances { - if match, err := m.MatchValues(i.Attributes); err != nil { - return nil, err - } else if match { - ret = append(ret, i.Attributes) - } - } - return ret, nil -} - -// UnmarshalJSON implements the Unmarshaler interface of "encoding/json". -func (m *MatchExpressionSet) UnmarshalJSON(data []byte) error { - *m = MatchExpressionSet{} - - names := make([]string, 0) - if err := json.Unmarshal(data, &names); err == nil { - // Simplified slice form - for _, name := range names { - split := strings.SplitN(name, "=", 2) - if len(split) == 1 { - (*m)[split[0]] = newMatchExpression(MatchExists) - } else { - (*m)[split[0]] = newMatchExpression(MatchIn, split[1]) - } - } - } else { - // Unmarshal the full map form - expressions := make(map[string]*MatchExpression) - if err := json.Unmarshal(data, &expressions); err != nil { - return err - } - for k, v := range expressions { - if v != nil { - (*m)[k] = v - } else { - (*m)[k] = newMatchExpression(MatchExists) - } - } - } - - return nil -} - -// UnmarshalJSON implements the Unmarshaler interface of "encoding/json". -func (m *MatchOp) UnmarshalJSON(data []byte) error { - var raw string - - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - if _, ok := matchOps[MatchOp(raw)]; !ok { - return fmt.Errorf("invalid Op %q", raw) - } - *m = MatchOp(raw) - return nil -} - -// UnmarshalJSON implements the Unmarshaler interface of "encoding/json". -func (m *MatchValue) UnmarshalJSON(data []byte) error { - var raw interface{} - - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - switch v := raw.(type) { - case string: - *m = []string{v} - case bool: - *m = []string{strconv.FormatBool(v)} - case float64: - *m = []string{strconv.FormatFloat(v, 'f', -1, 64)} - case []interface{}: - values := make([]string, len(v)) - for i, value := range v { - str, ok := value.(string) - if !ok { - return fmt.Errorf("invalid value %v in %v", value, v) - } - values[i] = str - } - *m = values - default: - return fmt.Errorf("invalid values '%v' (%T)", v, v) - } - - return nil -} - -// DeepCopy supplements the auto-generated code -func (in *valueRegexpCache) DeepCopy() *valueRegexpCache { - if in == nil { - return nil - } - out := new(valueRegexpCache) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is a stub to augment the auto-generated code -// -//nolint:staticcheck // re.Copy is deprecated but we want to use it here -func (in *valueRegexpCache) DeepCopyInto(out *valueRegexpCache) { - *out = make(valueRegexpCache, len(*in)) - for i, re := range *in { - (*out)[i] = re.Copy() - } -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/feature.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/feature.go index b3a9b1d5d..a3d066bd4 100644 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/feature.go +++ b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/feature.go @@ -16,6 +16,8 @@ limitations under the License. package v1alpha1 +import "maps" + // NewNodeFeatureSpec creates a new emprty instance of NodeFeatureSpec type, // initializing all fields to proper empty values. func NewNodeFeatureSpec() *NodeFeatureSpec { @@ -75,9 +77,7 @@ func (f *Features) InsertAttributeFeatures(domain, feature string, values map[st return } - for k, v := range values { - f.Attributes[key].Elements[k] = v - } + maps.Copy(f.Attributes[key].Elements, values) } // Exists returns a non-empty string if a feature exists. The return value is @@ -103,9 +103,7 @@ func (in *NodeFeatureSpec) MergeInto(out *NodeFeatureSpec) { if out.Labels == nil { out.Labels = make(map[string]string, len(in.Labels)) } - for key, val := range in.Labels { - out.Labels[key] = val - } + maps.Copy(out.Labels, in.Labels) } } @@ -151,9 +149,7 @@ func (in *FlagFeatureSet) MergeInto(out *FlagFeatureSet) { if out.Elements == nil { out.Elements = make(map[string]Nil, len(in.Elements)) } - for key, val := range in.Elements { - out.Elements[key] = val - } + maps.Copy(out.Elements, in.Elements) } } @@ -163,9 +159,7 @@ func (in *AttributeFeatureSet) MergeInto(out *AttributeFeatureSet) { if out.Elements == nil { out.Elements = make(map[string]string, len(in.Elements)) } - for key, val := range in.Elements { - out.Elements[key] = val - } + maps.Copy(out.Elements, in.Elements) } } diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/rule.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/rule.go deleted file mode 100644 index 6bea00dee..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/rule.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "bytes" - "fmt" - "strings" - "text/template" - - corev1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - "sigs.k8s.io/node-feature-discovery/pkg/utils" -) - -// RuleOutput contains the output out rule execution. -// +k8s:deepcopy-gen=false -type RuleOutput struct { - ExtendedResources map[string]string - Labels map[string]string - Vars map[string]string - Taints []corev1.Taint -} - -// Execute the rule against a set of input features. -func (r *Rule) Execute(features *Features) (RuleOutput, error) { - extendedResources := make(map[string]string) - labels := make(map[string]string) - vars := make(map[string]string) - - if len(r.MatchAny) > 0 { - // Logical OR over the matchAny matchers - matched := false - for _, matcher := range r.MatchAny { - if isMatch, matches, err := matcher.match(features); err != nil { - return RuleOutput{}, err - } else if isMatch { - matched = true - klog.V(4).InfoS("matchAny matched", "ruleName", r.Name, "matchedFeatures", utils.DelayedDumper(matches)) - - if r.LabelsTemplate == "" && r.VarsTemplate == "" { - // there's no need to evaluate other matchers in MatchAny - // if there are no templates to be executed on them - so - // short-circuit and stop on first match here - break - } - - if err := r.executeLabelsTemplate(matches, labels); err != nil { - return RuleOutput{}, err - } - if err := r.executeVarsTemplate(matches, vars); err != nil { - return RuleOutput{}, err - } - } - } - if !matched { - klog.V(2).InfoS("rule did not match", "ruleName", r.Name) - return RuleOutput{}, nil - } - } - - if len(r.MatchFeatures) > 0 { - if isMatch, matches, err := r.MatchFeatures.match(features); err != nil { - return RuleOutput{}, err - } else if !isMatch { - klog.V(2).InfoS("rule did not match", "ruleName", r.Name) - return RuleOutput{}, nil - } else { - klog.V(4).InfoS("matchFeatures matched", "ruleName", r.Name, "matchedFeatures", utils.DelayedDumper(matches)) - if err := r.executeLabelsTemplate(matches, labels); err != nil { - return RuleOutput{}, err - } - if err := r.executeVarsTemplate(matches, vars); err != nil { - return RuleOutput{}, err - } - } - } - - for k, v := range r.ExtendedResources { - extendedResources[k] = v - } - - for k, v := range r.Labels { - labels[k] = v - } - for k, v := range r.Vars { - vars[k] = v - } - - ret := RuleOutput{ExtendedResources: extendedResources, Labels: labels, Vars: vars, Taints: r.Taints} - klog.V(2).InfoS("rule matched", "ruleName", r.Name, "ruleOutput", utils.DelayedDumper(ret)) - return ret, nil -} - -func (r *Rule) executeLabelsTemplate(in matchedFeatures, out map[string]string) error { - if r.LabelsTemplate == "" { - return nil - } - - if r.labelsTemplate == nil { - t, err := newTemplateHelper(r.LabelsTemplate) - if err != nil { - return fmt.Errorf("failed to parse LabelsTemplate: %w", err) - } - r.labelsTemplate = t - } - - labels, err := r.labelsTemplate.expandMap(in) - if err != nil { - return fmt.Errorf("failed to expand LabelsTemplate: %w", err) - } - for k, v := range labels { - out[k] = v - } - return nil -} - -func (r *Rule) executeVarsTemplate(in matchedFeatures, out map[string]string) error { - if r.VarsTemplate == "" { - return nil - } - if r.varsTemplate == nil { - t, err := newTemplateHelper(r.VarsTemplate) - if err != nil { - return err - } - r.varsTemplate = t - } - - vars, err := r.varsTemplate.expandMap(in) - if err != nil { - return err - } - for k, v := range vars { - out[k] = v - } - return nil -} - -type matchedFeatures map[string]domainMatchedFeatures - -type domainMatchedFeatures map[string]interface{} - -func (e *MatchAnyElem) match(features *Features) (bool, matchedFeatures, error) { - return e.MatchFeatures.match(features) -} - -func (m *FeatureMatcher) match(features *Features) (bool, matchedFeatures, error) { - matches := make(matchedFeatures, len(*m)) - - // Logical AND over the terms - for _, term := range *m { - // Ignore case - featureName := strings.ToLower(term.Feature) - - nameSplit := strings.SplitN(term.Feature, ".", 2) - if len(nameSplit) != 2 { - klog.InfoS("invalid feature name (not .), cannot be used for templating", "featureName", term.Feature) - nameSplit = []string{featureName, ""} - } - - if _, ok := matches[nameSplit[0]]; !ok { - matches[nameSplit[0]] = make(domainMatchedFeatures) - } - - var isMatch bool - var err error - if f, ok := features.Flags[featureName]; ok { - m, v, e := term.MatchExpressions.MatchGetKeys(f.Elements) - isMatch = m - err = e - matches[nameSplit[0]][nameSplit[1]] = v - } else if f, ok := features.Attributes[featureName]; ok { - m, v, e := term.MatchExpressions.MatchGetValues(f.Elements) - isMatch = m - err = e - matches[nameSplit[0]][nameSplit[1]] = v - } else if f, ok := features.Instances[featureName]; ok { - v, e := term.MatchExpressions.MatchGetInstances(f.Elements) - isMatch = len(v) > 0 - err = e - matches[nameSplit[0]][nameSplit[1]] = v - } else { - return false, nil, fmt.Errorf("feature %q not available", featureName) - } - - if err != nil { - return false, nil, err - } else if !isMatch { - return false, nil, nil - } - } - return true, matches, nil -} - -type templateHelper struct { - template *template.Template -} - -func newTemplateHelper(name string) (*templateHelper, error) { - tmpl, err := template.New("").Option("missingkey=error").Parse(name) - if err != nil { - return nil, fmt.Errorf("invalid template: %w", err) - } - return &templateHelper{template: tmpl}, nil -} - -// DeepCopy is a stub to augment the auto-generated code -func (h *templateHelper) DeepCopy() *templateHelper { - if h == nil { - return nil - } - out := new(templateHelper) - h.DeepCopyInto(out) - return out -} - -// DeepCopyInto is a stub to augment the auto-generated code -func (h *templateHelper) DeepCopyInto(out *templateHelper) { - // HACK: just re-use the template - out.template = h.template -} - -func (h *templateHelper) execute(data interface{}) (string, error) { - var tmp bytes.Buffer - if err := h.template.Execute(&tmp, data); err != nil { - return "", err - } - return tmp.String(), nil -} - -// expandMap is a helper for expanding a template in to a map of strings. Data -// after executing the template is expexted to be key=value pairs separated by -// newlines. -func (h *templateHelper) expandMap(data interface{}) (map[string]string, error) { - expanded, err := h.execute(data) - if err != nil { - return nil, err - } - - // Split out individual key-value pairs - out := make(map[string]string) - for _, item := range strings.Split(expanded, "\n") { - // Remove leading/trailing whitespace and skip empty lines - if trimmed := strings.TrimSpace(item); trimmed != "" { - split := strings.SplitN(trimmed, "=", 2) - if len(split) == 1 { - return nil, fmt.Errorf("missing value in expanded template line %q, (format must be '=')", trimmed) - } - out[split[0]] = split[1] - } - } - return out, nil -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/types.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/types.go index c611086ff..f5bfcb7ec 100644 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/types.go @@ -146,6 +146,10 @@ type Rule struct { // +optional LabelsTemplate string `json:"labelsTemplate"` + // Annotations to create if the rule matches. + // +optional + Annotations map[string]string `json:"annotations"` + // Vars is the variables to store if the rule matches. Variables do not // directly inflict any changes in the node object. However, they can be // referenced from other rules enabling more complex rule hierarchies, @@ -174,10 +178,6 @@ type Rule struct { // MatchAny specifies a list of matchers one of which must match. // +optional MatchAny []MatchAnyElem `json:"matchAny"` - - // private helpers/cache for handling golang templates - labelsTemplate *templateHelper `json:"-"` - varsTemplate *templateHelper `json:"-"` } // MatchAnyElem specifies one sub-matcher of MatchAny. @@ -194,25 +194,25 @@ type FeatureMatcher []FeatureMatcherTerm // requirements (specified as MatchExpressions) are evaluated against each // element in the feature set. type FeatureMatcherTerm struct { - Feature string `json:"feature"` - MatchExpressions MatchExpressionSet `json:"matchExpressions"` + // Feature is the name of the feature set to match against. + Feature string `json:"feature"` + // MatchExpressions is the set of per-element expressions evaluated. These + // match against the value of the specified elements. + // +optional + MatchExpressions *MatchExpressionSet `json:"matchExpressions"` + // MatchName in an expression that is matched against the name of each + // element in the feature set. + // +optional + MatchName *MatchExpression `json:"matchName"` } // MatchExpressionSet contains a set of MatchExpressions, each of which is // evaluated against a set of input values. type MatchExpressionSet map[string]*MatchExpression -// Expressions is a helper type to work around issues with k8s deepcopy-gen - // MatchExpression specifies an expression to evaluate against a set of input // values. It contains an operator that is applied when matching the input and // an array of values that the operator evaluates the input against. -// -// NB: CreateMatchExpression or MustCreateMatchExpression() should be used for -// creating new instances. -// -// NB: Validate() must be called if Op or Value fields are modified or if a new -// instance is created from scratch without using the helper functions. type MatchExpression struct { // Op is the operator to be applied. Op MatchOp `json:"op"` @@ -224,9 +224,6 @@ type MatchExpression struct { // In other cases Value should contain at least one element. // +optional Value MatchValue `json:"value,omitempty"` - - // valueRe caches compiled regexps for "InRegexp" operator - valueRe valueRegexpCache `json:"-"` } // MatchOp is the match operator that is applied on values when evaluating a @@ -286,3 +283,7 @@ const ( // output of preceding rules. RuleBackrefFeature = "matched" ) + +// MatchAllNames is a special key in MatchExpressionSet to use field names +// (keys from the input) instead of values when matching. +const MatchAllNames = "*" diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/zz_generated.deepcopy.go index fac8c2ad1..6f9d9052e 100644 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/node-feature-discovery/pkg/apis/nfd/v1alpha1/zz_generated.deepcopy.go @@ -58,19 +58,28 @@ func (in *FeatureMatcherTerm) DeepCopyInto(out *FeatureMatcherTerm) { *out = *in if in.MatchExpressions != nil { in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make(MatchExpressionSet, len(*in)) - for key, val := range *in { - var outVal *MatchExpression - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(MatchExpression) - (*in).DeepCopyInto(*out) + *out = new(MatchExpressionSet) + if **in != nil { + in, out := *in, *out + *out = make(map[string]*MatchExpression, len(*in)) + for key, val := range *in { + var outVal *MatchExpression + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(MatchExpression) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal } - (*out)[key] = outVal } } + if in.MatchName != nil { + in, out := &in.MatchName, &out.MatchName + *out = new(MatchExpression) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureMatcherTerm. @@ -215,7 +224,6 @@ func (in *MatchExpression) DeepCopyInto(out *MatchExpression) { *out = make(MatchValue, len(*in)) copy(*out, *in) } - in.valueRe.DeepCopyInto(&out.valueRe) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchExpression. @@ -276,57 +284,6 @@ func (in MatchValue) DeepCopy() MatchValue { return *out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in MatchedInstance) DeepCopyInto(out *MatchedInstance) { - { - in := &in - *out = make(MatchedInstance, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchedInstance. -func (in MatchedInstance) DeepCopy() MatchedInstance { - if in == nil { - return nil - } - out := new(MatchedInstance) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MatchedKey) DeepCopyInto(out *MatchedKey) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchedKey. -func (in *MatchedKey) DeepCopy() *MatchedKey { - if in == nil { - return nil - } - out := new(MatchedKey) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MatchedValue) DeepCopyInto(out *MatchedValue) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchedValue. -func (in *MatchedValue) DeepCopy() *MatchedValue { - if in == nil { - return nil - } - out := new(MatchedValue) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Nil) DeepCopyInto(out *Nil) { *out = *in @@ -513,6 +470,13 @@ func (in *Rule) DeepCopyInto(out *Rule) { (*out)[key] = val } } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Vars != nil { in, out := &in.Vars, &out.Vars *out = make(map[string]string, len(*in)) @@ -548,14 +512,6 @@ func (in *Rule) DeepCopyInto(out *Rule) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.labelsTemplate != nil { - in, out := &in.labelsTemplate, &out.labelsTemplate - *out = (*in).DeepCopy() - } - if in.varsTemplate != nil { - in, out := &in.varsTemplate, &out.varsTemplate - *out = (*in).DeepCopy() - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/dump.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/dump.go deleted file mode 100644 index 5b713ad61..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/dump.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - - "sigs.k8s.io/yaml" -) - -type dumper struct { - obj interface{} -} - -// String implements the fmt.Stringer interface -func (d *dumper) String() string { - return Dump(d.obj) -} - -// DelayedDumper delays the dumping of an object. Useful in logging to delay -// the processing (JSON marshalling) until (or if) the object is actually -// evaluated. -func DelayedDumper(obj interface{}) fmt.Stringer { - return &dumper{obj: obj} -} - -// Dump dumps an object into YAML textual format -func Dump(obj interface{}) string { - out, err := yaml.Marshal(obj) - if err != nil { - return fmt.Sprintf("\n", obj, err) - } - return string(out) -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/flags.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/flags.go deleted file mode 100644 index ab9379c05..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/flags.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "encoding/json" - "flag" - "fmt" - "regexp" - "sort" - "strings" - "time" -) - -// RegexpVal is a wrapper for regexp command line flags -type RegexpVal struct { - regexp.Regexp -} - -// Set implements the flag.Value interface -func (a *RegexpVal) Set(val string) error { - r, err := regexp.Compile(val) - if err == nil { - a.Regexp = *r - } - return err -} - -// UnmarshalJSON implements the Unmarshaler interface from "encoding/json" -func (a *RegexpVal) UnmarshalJSON(data []byte) error { - var v interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - switch val := v.(type) { - case string: - if r, err := regexp.Compile(string(val)); err != nil { - return err - } else { - *a = RegexpVal{*r} - } - default: - return fmt.Errorf("invalid regexp %s", data) - } - return nil -} - -// StringSetVal is a Value encapsulating a set of comma-separated strings -type StringSetVal map[string]struct{} - -// Set implements the flag.Value interface -func (a *StringSetVal) Set(val string) error { - m := map[string]struct{}{} - for _, n := range strings.Split(val, ",") { - m[n] = struct{}{} - } - *a = m - return nil -} - -// String implements the flag.Value interface -func (a *StringSetVal) String() string { - if *a == nil { - return "" - } - vals := make([]string, 0, len(*a)) - for val := range *a { - vals = append(vals, val) - } - sort.Strings(vals) - return strings.Join(vals, ",") -} - -// UnmarshalJSON implements the Unmarshaler interface from "encoding/json" -func (a *StringSetVal) UnmarshalJSON(data []byte) error { - var tmp []string - if err := json.Unmarshal(data, &tmp); err != nil { - return err - } - for _, v := range tmp { - (*a)[v] = struct{}{} - } - return nil -} - -// StringSliceVal is a Value encapsulating a slice of comma-separated strings -type StringSliceVal []string - -// Set implements the regexp.Value interface -func (a *StringSliceVal) Set(val string) error { - *a = strings.Split(val, ",") - return nil -} - -// String implements the regexp.Value interface -func (a *StringSliceVal) String() string { - if *a == nil { - return "" - } - return strings.Join(*a, ",") -} - -// KlogFlagVal is a wrapper to allow dynamic control of klog from the config file -type KlogFlagVal struct { - flag *flag.Flag - isSetFromCmdLine bool -} - -// Set implements flag.Value interface -func (k *KlogFlagVal) Set(value string) error { - k.isSetFromCmdLine = true - return k.flag.Value.Set(value) -} - -// String implements flag.Value interface -func (k *KlogFlagVal) String() string { - if k.flag == nil { - return "" - } - // Need to handle "log_backtrace_at" in a special way - s := k.flag.Value.String() - if k.flag.Name == "log_backtrace_at" && s == ":0" { - s = "" - } - return s -} - -// DefValue returns the default value of KlogFlagVal as string -func (k *KlogFlagVal) DefValue() string { - // Need to handle "log_backtrace_at" in a special way - d := k.flag.DefValue - if k.flag.Name == "log_backtrace_at" && d == ":0" { - d = "" - } - return d -} - -// SetFromConfig sets the value without marking it as set from the cmdline -func (k *KlogFlagVal) SetFromConfig(value string) error { - return k.flag.Value.Set(value) -} - -// IsSetFromCmdline returns true if the value has been set via Set() -func (k *KlogFlagVal) IsSetFromCmdline() bool { return k.isSetFromCmdLine } - -// IsBoolFlag implements flag.boolFlag.IsBoolFlag() for wrapped klog flags. -func (k *KlogFlagVal) IsBoolFlag() bool { - if ba, ok := k.flag.Value.(boolFlag); ok { - return ba.IsBoolFlag() - } - return false -} - -// NewKlogFlagVal wraps a klog flag into KlogFlagVal type -func NewKlogFlagVal(f *flag.Flag) *KlogFlagVal { - return &KlogFlagVal{flag: f} -} - -// boolFlag replicates boolFlag interface internal to the flag package -type boolFlag interface { - IsBoolFlag() bool -} - -// DurationVal is a wrapper for handling time.Duration as a command line flag -type DurationVal struct { - time.Duration -} - -// UnmarshalJSON implements the Unmarshaler interface from "encoding/json" -func (d *DurationVal) UnmarshalJSON(data []byte) error { - var v interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - switch val := v.(type) { - case float64: - d.Duration = time.Duration(val) - case string: - var err error - d.Duration, err = time.ParseDuration(val) - if err != nil { - return err - } - default: - return fmt.Errorf("invalid duration %s", data) - } - return nil -} - -// Set implements the flag.Value interface -func (d *DurationVal) Set(val string) error { - m, err := time.ParseDuration(val) - if err != nil { - return err - } - *d = DurationVal{m} - return nil -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/fswatcher.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/fswatcher.go deleted file mode 100644 index 82bf58b06..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/fswatcher.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "path/filepath" - "time" - - "github.com/fsnotify/fsnotify" - "k8s.io/klog/v2" -) - -// FsWatcher is a wrapper helper for watching files -type FsWatcher struct { - *fsnotify.Watcher - - Events chan struct{} - ratelimit time.Duration - names []string - paths map[string]struct{} -} - -// CreateFsWatcher creates a new FsWatcher -func CreateFsWatcher(ratelimit time.Duration, names ...string) (*FsWatcher, error) { - w := &FsWatcher{ - Events: make(chan struct{}), - names: names, - ratelimit: ratelimit, - } - - if err := w.reset(names...); err != nil { - return nil, err - } - - go w.watch() - - return w, nil -} - -// reset resets the file watches -func (w *FsWatcher) reset(names ...string) error { - if err := w.initWatcher(); err != nil { - return err - } - if err := w.add(names...); err != nil { - return err - } - - return nil -} - -func (w *FsWatcher) initWatcher() error { - if w.Watcher != nil { - if err := w.Watcher.Close(); err != nil { - return fmt.Errorf("failed to close fsnotify watcher: %v", err) - } - } - w.paths = make(map[string]struct{}) - - watcher, err := fsnotify.NewWatcher() - if err != nil { - w.Watcher = nil - return fmt.Errorf("failed to create fsnotify watcher: %v", err) - } - w.Watcher = watcher - - return nil -} - -func (w *FsWatcher) add(names ...string) error { - for _, name := range names { - if name == "" { - continue - } - - added := false - // Add watches for all directory components so that we catch e.g. renames - // upper in the tree - for p := name; ; p = filepath.Dir(p) { - if _, ok := w.paths[p]; !ok { - if err := w.Add(p); err != nil { - klog.V(1).ErrorS(err, "failed to add fsnotify watch", "path", p) - } else { - klog.V(1).InfoS("added fsnotify watch", "path", p) - added = true - } - - w.paths[p] = struct{}{} - } else { - added = true - } - if filepath.Dir(p) == p { - break - } - } - if !added { - // Want to be sure that we watch something - return fmt.Errorf("failed to add any watch") - } - } - - return nil -} - -func (w *FsWatcher) watch() { - var ratelimiter <-chan time.Time - for { - select { - case e, ok := <-w.Watcher.Events: - // Watcher has been closed - if !ok { - klog.InfoS("watcher closed") - return - } - - // If any of our paths change - name := filepath.Clean(e.Name) - if _, ok := w.paths[filepath.Clean(name)]; ok { - klog.V(2).InfoS("fsnotify event detected", "path", name, "fsNotifyEvent", e) - - // Rate limiter. In certain filesystem operations we get - // numerous events in quick succession - ratelimiter = time.After(w.ratelimit) - } - - case e, ok := <-w.Watcher.Errors: - // Watcher has been closed - if !ok { - klog.InfoS("watcher closed") - return - } - klog.ErrorS(e, "fswatcher error event detected") - - case <-ratelimiter: - // Blindly remove existing watch and add a new one - if err := w.reset(w.names...); err != nil { - klog.ErrorS(err, "re-trying in 60 seconds") - ratelimiter = time.After(60 * time.Second) - } - - w.Events <- struct{}{} - } - } -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/grpc_log.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/grpc_log.go deleted file mode 100644 index f8c8bf0ae..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/grpc_log.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - - "google.golang.org/grpc/grpclog" - "k8s.io/klog/v2" -) - -// ConfigureGrpcKlog wraps grpc logging to use klog -func ConfigureGrpcKlog() { - grpclog.SetLoggerV2(new(grpcLogger)) -} - -// grpcLogger implements the LoggerV2 interface from grpclog -type grpcLogger struct{} - -func (g grpcLogger) Error(args ...interface{}) { - klog.ErrorDepth(2, args...) -} - -func (g grpcLogger) Errorf(format string, args ...interface{}) { - klog.ErrorDepth(2, fmt.Sprintf(format, args...)) -} - -func (g grpcLogger) Errorln(args ...interface{}) { - klog.ErrorDepth(2, args...) -} - -func (g grpcLogger) Fatal(args ...interface{}) { - klog.FatalDepth(2, args...) -} - -func (g grpcLogger) Fatalf(format string, args ...interface{}) { - klog.FatalDepth(2, fmt.Sprintf(format, args...)) -} - -func (g grpcLogger) Fatalln(args ...interface{}) { - klog.FatalDepth(2, args...) -} - -func (g grpcLogger) Info(args ...interface{}) { - klog.InfoDepth(2, args...) -} - -func (g grpcLogger) Infof(format string, args ...interface{}) { - klog.InfoDepth(2, fmt.Sprintf(format, args...)) -} - -func (g grpcLogger) Infoln(args ...interface{}) { - klog.InfoDepth(2, args...) -} - -func (g grpcLogger) Warning(args ...interface{}) { - klog.WarningDepth(2, args...) -} - -func (g grpcLogger) Warningf(format string, args ...interface{}) { - klog.WarningDepth(2, fmt.Sprintf(format, args...)) -} - -func (g grpcLogger) Warningln(args ...interface{}) { - klog.WarningDepth(2, args...) -} - -func (g grpcLogger) V(l int) bool { - return klog.V(klog.Level(l)).Enabled() -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath/hostpath.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath/hostpath.go deleted file mode 100644 index 557e33590..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath/hostpath.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hostpath - -import ( - "path/filepath" -) - -var ( - pathPrefix = "/" - // BootDir is where the /boot directory of the system to be inspected is located - BootDir = HostDir(pathPrefix + "boot") - // EtcDir is where the /etc directory of the system to be inspected is located - EtcDir = HostDir(pathPrefix + "etc") - // SysfsDir is where the /sys directory of the system to be inspected is located - SysfsDir = HostDir(pathPrefix + "sys") - // UsrDir is where the /usr directory of the system to be inspected is located - UsrDir = HostDir(pathPrefix + "usr") - // VarDir is where the /var directory of the system to be inspected is located - VarDir = HostDir(pathPrefix + "var") - // LibDir is where the /lib directory of the system to be inspected is located - LibDir = HostDir(pathPrefix + "lib") -) - -// HostDir is a helper for handling host system directories -type HostDir string - -// Path returns a full path to a file under HostDir -func (d HostDir) Path(elem ...string) string { - return filepath.Join(append([]string{string(d)}, elem...)...) -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/kubernetes.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/kubernetes.go deleted file mode 100644 index 7601da0fc..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/kubernetes.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "os" - "strings" -) - -var nodeName string - -// NodeName returns the name of the k8s node we're running on. -func NodeName() string { - if nodeName == "" { - nodeName = os.Getenv("NODE_NAME") - } - return nodeName -} - -// GetKubernetesNamespace returns the kubernetes namespace we're running under, -// or an empty string if the namespace cannot be determined. -func GetKubernetesNamespace() string { - const kubernetesNamespaceFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" - if _, err := os.Stat(kubernetesNamespaceFilePath); err == nil { - data, err := os.ReadFile(kubernetesNamespaceFilePath) - if err == nil { - return strings.TrimSpace(string(data)) - } - } - return os.Getenv("KUBERNETES_NAMESPACE") -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/memory_resources.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/memory_resources.go deleted file mode 100644 index ab6447a75..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/memory_resources.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/klog/v2" - resourcehelper "k8s.io/kubernetes/pkg/apis/core/helper" - - "sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath" -) - -var ( - sysBusNodeBasepath = hostpath.SysfsDir.Path("bus/node/devices") -) - -// NumaMemoryResources contains information of the memory resources per NUMA -// nodes of the system. -type NumaMemoryResources map[int]MemoryResourceInfo - -// MemoryResourceInfo holds information of memory resources per resource type. -type MemoryResourceInfo map[corev1.ResourceName]int64 - -// GetNumaMemoryResources returns total amount of memory and hugepages under NUMA nodes -func GetNumaMemoryResources() (NumaMemoryResources, error) { - nodes, err := os.ReadDir(sysBusNodeBasepath) - if err != nil { - return nil, err - } - - memoryResources := make(NumaMemoryResources, len(nodes)) - for _, n := range nodes { - numaNode := n.Name() - nodeID, err := strconv.Atoi(numaNode[4:]) - if err != nil { - return nil, fmt.Errorf("failed to parse NUMA node ID of %q", numaNode) - } - - info := make(MemoryResourceInfo) - - // Get total memory - nodeTotalMemory, err := readTotalMemoryFromMeminfo(filepath.Join(sysBusNodeBasepath, numaNode, "meminfo")) - if err != nil { - return nil, err - } - info[corev1.ResourceMemory] = nodeTotalMemory - - // Get hugepages - hugepageBytes, err := getHugepagesBytes(filepath.Join(sysBusNodeBasepath, numaNode, "hugepages")) - if err != nil { - if os.IsNotExist(err) { - continue - } else { - return nil, err - } - } - for n, s := range hugepageBytes { - info[n] = s - } - - memoryResources[nodeID] = info - } - - return memoryResources, nil -} - -func getHugepagesBytes(path string) (MemoryResourceInfo, error) { - entries, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - hugepagesBytes := make(MemoryResourceInfo) - for _, entry := range entries { - split := strings.SplitN(entry.Name(), "-", 2) - if len(split) != 2 || split[0] != "hugepages" { - klog.InfoS("malformed hugepages entry", "hugepagesEntry", entry.Name()) - continue - } - - // Use Ki instead of kB - q, err := resource.ParseQuantity(strings.Replace(split[1], "kB", "Ki", 1)) - if err != nil { - return nil, err - } - - data, err := os.ReadFile(filepath.Join(path, entry.Name(), "nr_hugepages")) - if err != nil { - return nil, err - } - - nr, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - return nil, err - } - - size, _ := q.AsInt64() - name := corev1.ResourceName(resourcehelper.HugePageResourceName(q)) - hugepagesBytes[name] = nr * size - } - - return hugepagesBytes, nil -} - -func readTotalMemoryFromMeminfo(path string) (int64, error) { - data, err := os.ReadFile(path) - if err != nil { - return -1, err - } - - for _, line := range strings.Split(string(data), "\n") { - split := strings.SplitN(line, ":", 2) - if len(split) != 2 { - continue - } - - if strings.Contains(split[0], "MemTotal") { - memValue := strings.Trim(split[1], "\t\n kB") - convertedValue, err := strconv.ParseInt(memValue, 10, 64) - if err != nil { - return -1, fmt.Errorf("failed to convert value: %v", memValue) - } - - // return information in bytes - return 1024 * convertedValue, nil - } - } - - return -1, fmt.Errorf("failed to find MemTotal field under the file %q", path) -} diff --git a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/tls.go b/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/tls.go deleted file mode 100644 index 3e2e6efe7..000000000 --- a/vendor/sigs.k8s.io/node-feature-discovery/pkg/utils/tls.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "os" - "sync" -) - -// TlsConfig is a TLS config wrapper/helper for cert rotation -type TlsConfig struct { - sync.Mutex - config *tls.Config -} - -// GetConfig returns the current TLS configuration. Intended to be used as the -// GetConfigForClient callback in tls.Config. -func (c *TlsConfig) GetConfig(*tls.ClientHelloInfo) (*tls.Config, error) { - c.Lock() - defer c.Unlock() - - return c.config, nil -} - -// UpdateConfig updates the wrapped TLS config -func (c *TlsConfig) UpdateConfig(certFile, keyFile, caFile string) error { - c.Lock() - defer c.Unlock() - - // Load cert for authenticating this server - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return fmt.Errorf("failed to load server certificate: %v", err) - } - // Load CA cert for client cert verification - caCert, err := os.ReadFile(caFile) - if err != nil { - return fmt.Errorf("failed to read root certificate file: %v", err) - } - caPool := x509.NewCertPool() - if ok := caPool.AppendCertsFromPEM(caCert); !ok { - return fmt.Errorf("failed to add certificate from '%s'", caFile) - } - - // Create TLS config - c.config = &tls.Config{ - Certificates: []tls.Certificate{cert}, - ClientCAs: caPool, - ClientAuth: tls.RequireAndVerifyClientCert, - GetConfigForClient: c.GetConfig, - MinVersion: tls.VersionTLS13, - } - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca581..41fc2474a 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go index 75a492d8e..f1aa25860 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/conflict.go @@ -112,7 +112,7 @@ func ConflictsFromManagers(sets fieldpath.ManagedFields) Conflicts { set.Set().Iterate(func(p fieldpath.Path) { conflicts = append(conflicts, Conflict{ Manager: manager, - Path: p, + Path: p.Copy(), }) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index 1b23dcbd5..d5a977d60 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -18,6 +18,7 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" + "sigs.k8s.io/structured-merge-diff/v4/value" ) // Converter is an interface to the conversion logic. The converter @@ -27,19 +28,39 @@ type Converter interface { IsMissingVersionError(error) bool } -// Updater is the object used to compute updated FieldSets and also -// merge the object on Apply. -type Updater struct { +// UpdateBuilder allows you to create a new Updater by exposing all of +// the options and setting them once. +type UpdaterBuilder struct { Converter Converter IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - enableUnions bool + // Stop comparing the new object with old object after applying. + // This was initially used to avoid spurious etcd update, but + // since that's vastly inefficient, we've come-up with a better + // way of doing that. Create this flag to stop it. + // Comparing has become more expensive too now that we're not using + // `Compare` but `value.Equals` so this gives an option to avoid it. + ReturnInputOnNoop bool } -// EnableUnionFeature turns on union handling. It is disabled by default until the -// feature is complete. -func (s *Updater) EnableUnionFeature() { - s.enableUnions = true +func (u *UpdaterBuilder) BuildUpdater() *Updater { + return &Updater{ + Converter: u.Converter, + IgnoredFields: u.IgnoredFields, + returnInputOnNoop: u.ReturnInputOnNoop, + } +} + +// Updater is the object used to compute updated FieldSets and also +// merge the object on Apply. +type Updater struct { + // Deprecated: This will eventually become private. + Converter Converter + + // Deprecated: This will eventually become private. + IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + + returnInputOnNoop bool } func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { @@ -126,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - newObject, err = liveObject.NormalizeUnions(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err @@ -145,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp ignored = fieldpath.NewSet() } managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), version, false, ) @@ -157,30 +172,17 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp // Apply should be called when Apply is run, given the current object as // well as the configuration that is applied. This will merge the object -// and return it. If the object hasn't changed, nil is returned (the -// managers can still have changed though). +// and return it. func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, manager string, force bool) (*typed.TypedValue, fieldpath.ManagedFields, error) { var err error managers, err = s.reconcileManagedFieldsWithSchemaChanges(liveObject, managers) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - configObject, err = configObject.NormalizeUnionsApply(configObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } newObject, err := liveObject.Merge(configObject) if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) } - if s.enableUnions { - newObject, err = configObject.NormalizeUnionsApply(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } lastSet := managers[manager] set, err := configObject.ToFieldSet() if err != nil { @@ -200,11 +202,11 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to prune fields: %v", err) } - managers, compare, err := s.update(liveObject, newObject, version, managers, manager, force) + managers, _, err = s.update(liveObject, newObject, version, managers, manager, force) if err != nil { return nil, fieldpath.ManagedFields{}, err } - if compare.IsSame() { + if !s.returnInputOnNoop && value.EqualsUsing(value.NewFreelistAllocator(), liveObject.AsValue(), newObject.AsValue()) { newObject = nil } return newObject, managers, nil @@ -218,7 +220,8 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel if lastSet == nil || lastSet.Set().Empty() { return merged, nil } - convertedMerged, err := s.Converter.Convert(merged, lastSet.APIVersion()) + version := lastSet.APIVersion() + convertedMerged, err := s.Converter.Convert(merged, version) if err != nil { if s.Converter.IsMissingVersionError(err) { return merged, nil @@ -228,7 +231,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel sc, tr := convertedMerged.Schema(), convertedMerged.TypeRef() pruned := convertedMerged.RemoveItems(lastSet.Set().EnsureNamedFieldsAreMembers(sc, tr)) - pruned, err = s.addBackOwnedItems(convertedMerged, pruned, managers, applyingManager) + pruned, err = s.addBackOwnedItems(convertedMerged, pruned, version, managers, applyingManager) if err != nil { return nil, fmt.Errorf("failed add back owned items: %v", err) } @@ -241,7 +244,7 @@ func (s *Updater) prune(merged *typed.TypedValue, managers fieldpath.ManagedFiel // addBackOwnedItems adds back any fields, list and map items that were removed by prune, // but other appliers or updaters (or the current applier's new config) claim to own. -func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { +func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, prunedVersion fieldpath.APIVersion, managedFields fieldpath.ManagedFields, applyingManager string) (*typed.TypedValue, error) { var err error managedAtVersion := map[fieldpath.APIVersion]*fieldpath.Set{} for _, managerSet := range managedFields { @@ -252,7 +255,6 @@ func (s *Updater) addBackOwnedItems(merged, pruned *typed.TypedValue, managedFie } // Add back owned items at pruned version first to avoid conversion failure // caused by pruned fields which are required for conversion. - prunedVersion := fieldpath.APIVersion(*pruned.TypeRef().NamedType) if managed, ok := managedAtVersion[prunedVersion]; ok { merged, pruned, err = s.addBackOwnedItemsForVersion(merged, pruned, prunedVersion, managed) if err != nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 7e5dc7582..5d3707a5b 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -73,7 +73,7 @@ type Atom struct { } // Scalar (AKA "primitive") represents a type which has a single value which is -// either numeric, string, or boolean. +// either numeric, string, or boolean, or untyped for any of them. // // TODO: split numeric into float/int? Something even more fine-grained? type Scalar string @@ -82,6 +82,7 @@ const ( Numeric = Scalar("numeric") String = Scalar("string") Boolean = Scalar("boolean") + Untyped = Scalar("untyped") ) // ElementRelationship is an enum of the different possible relationships diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index 7d64d1308..6eb6c36df 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -110,7 +110,7 @@ var SchemaSchemaYAML = `types: scalar: string - name: deduceInvalidDiscriminator type: - scalar: bool + scalar: boolean - name: fields type: list: @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 000000000..ed483cbbc --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334f..78fdb0e75 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 913644083..fa227ac40 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -113,11 +113,12 @@ func (w *mergingWalker) doLeaf() { w.rule(w) } -func (w *mergingWalker) doScalar(t *schema.Scalar) (errs ValidationErrors) { - errs = append(errs, validateScalar(t, w.lhs, "lhs: ")...) - errs = append(errs, validateScalar(t, w.rhs, "rhs: ")...) - if len(errs) > 0 { - return errs +func (w *mergingWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) } // All scalars are leaf fields. @@ -179,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -198,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -221,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -239,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -271,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -281,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -289,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78fc..4258ee5ba 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761d..ad071ee8f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff05..d563a87ee 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index d63a97fe2..9be902828 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -45,6 +51,10 @@ func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedVal // conforms to the schema, for cases where that has already been checked or // where you're going to call a method that validates as a side-effect (like // ToFieldSet). +// +// Deprecated: This function was initially created because validation +// was expensive. Now that this has been solved, objects should always +// be created as validated, using `AsTyped`. func AsTypedUnvalidated(v value.Value, s *schema.Schema, typeRef schema.TypeRef) *TypedValue { tv := &TypedValue{ value: v, @@ -77,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -113,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -120,33 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.Equals(w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -161,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -273,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88ae..000000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index 378d30219..652e24c81 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -102,6 +106,12 @@ func validateScalar(t *schema.Scalar, v value.Value, prefix string) (errs Valida if !v.IsBool() { return errorf("%vexpected boolean, got %v", prefix, v) } + case schema.Untyped: + if !v.IsFloat() && !v.IsInt() && !v.IsString() && !v.IsBool() { + return errorf("%vexpected any scalar, got %v", prefix, v) + } + default: + return errorf("%vunexpected scalar type in schema: %v", prefix, *t) } return nil } @@ -123,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -131,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go index dc8b8c720..c38402b99 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapreflect.go @@ -136,7 +136,7 @@ func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { if !ok { return false } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + return EqualsUsing(a, vr.mustReuse(lhsVal, entry, nil, nil), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go index d8e208628..c3ae00b18 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/mapunstructured.go @@ -88,12 +88,12 @@ func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } @@ -168,12 +168,12 @@ func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { } vv := a.allocValueUnstructured() defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { + return other.IterateUsing(a, func(key string, value Value) bool { lhsVal, ok := m[key] if !ok { return false } - return Equals(vv.reuse(lhsVal), value) + return EqualsUsing(a, vv.reuse(lhsVal), value) }) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index a5a467c0f..f0d58d42c 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -154,7 +154,9 @@ func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fi if field.Type.Kind() == reflect.Ptr { e = field.Type.Elem() } - buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + if e.Kind() == reflect.Struct { + buildStructCacheEntry(e, infos, append(fieldPath, field.Index)) + } continue } info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}