diff --git a/go.mod b/go.mod
index be782a3f1..1e1c0524a 100644
--- a/go.mod
+++ b/go.mod
@@ -13,10 +13,10 @@ require (
github.com/pkg/errors v0.9.1
github.com/spf13/cobra v1.8.0
github.com/spf13/pflag v1.0.5
- go.bytebuilders.dev/audit v0.0.38
- go.bytebuilders.dev/license-proxyserver v0.0.19
- go.bytebuilders.dev/license-verifier v0.14.4
- go.bytebuilders.dev/license-verifier/kubernetes v0.14.4
+ go.bytebuilders.dev/audit v0.0.41
+ go.bytebuilders.dev/license-proxyserver v0.0.20
+ go.bytebuilders.dev/license-verifier v0.14.6
+ go.bytebuilders.dev/license-verifier/kubernetes v0.14.6
golang.org/x/text v0.21.0
gomodules.xyz/blobfs v0.1.14
gomodules.xyz/cert v1.6.0
@@ -47,7 +47,7 @@ require (
kmodules.xyz/prober v0.29.0
kmodules.xyz/webhook-runtime v0.29.1
sigs.k8s.io/controller-runtime v0.18.4
- stash.appscode.dev/apimachinery v0.38.1-0.20250114050236-cca8469a4c04
+ stash.appscode.dev/apimachinery v0.39.0
)
require (
@@ -62,7 +62,7 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
- github.com/Masterminds/semver/v3 v3.2.1 // indirect
+ github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.2.1 // indirect
@@ -87,8 +87,8 @@ require (
github.com/docker/docker v25.0.6+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/emicklei/go-restful/v3 v3.11.0 // indirect
- github.com/evanphx/json-patch v5.7.0+incompatible // indirect
+ github.com/emicklei/go-restful/v3 v3.12.1 // indirect
+ github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -98,9 +98,9 @@ require (
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.20.0 // indirect
- github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.4 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
@@ -122,18 +122,17 @@ require (
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
- github.com/huandu/xstrings v1.4.0 // indirect
+ github.com/huandu/xstrings v1.5.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect
github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.2 // indirect
+ github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
- github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@@ -145,25 +144,25 @@ require (
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
- github.com/nats-io/nats.go v1.36.0 // indirect
- github.com/nats-io/nkeys v0.4.7 // indirect
+ github.com/nats-io/nats.go v1.39.0 // indirect
+ github.com/nats-io/nkeys v0.4.9 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/ncw/swift v1.0.49 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/prometheus/client_golang v1.18.0 // indirect
- github.com/prometheus/client_model v0.5.0 // indirect
- github.com/prometheus/common v0.45.0 // indirect
- github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/client_model v0.6.1 // indirect
+ github.com/prometheus/common v0.46.0 // indirect
+ github.com/prometheus/procfs v0.15.0 // indirect
github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 // indirect
github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe // indirect
github.com/rancher/wrangler/v3 v3.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
- github.com/shopspring/decimal v1.3.1 // indirect
+ github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/spf13/cast v1.5.0 // indirect
+ github.com/spf13/cast v1.7.0 // indirect
github.com/stoewer/go-strcase v1.2.0 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
@@ -188,7 +187,7 @@ require (
go.uber.org/zap v1.27.0 // indirect
gocloud.dev v0.26.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
- golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
+ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/net v0.33.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/sync v0.10.0 // indirect
@@ -210,7 +209,7 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
google.golang.org/grpc v1.62.1 // indirect
- google.golang.org/protobuf v1.33.0 // indirect
+ google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
@@ -223,8 +222,9 @@ require (
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
kmodules.xyz/apiversion v0.2.0 // indirect
kmodules.xyz/go-containerregistry v0.0.12 // indirect
- kmodules.xyz/resource-metadata v0.18.12 // indirect
- kmodules.xyz/resource-metrics v0.30.2 // indirect
+ kmodules.xyz/resource-metadata v0.24.3 // indirect
+ kmodules.xyz/resource-metrics v0.30.5 // indirect
+ moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
sigs.k8s.io/cli-utils v0.35.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
diff --git a/go.sum b/go.sum
index c5d00f3f3..271b3a80b 100644
--- a/go.sum
+++ b/go.sum
@@ -138,8 +138,8 @@ github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbP
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
-github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -273,7 +273,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -301,8 +300,8 @@ github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryef
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
-github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
+github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -314,8 +313,8 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
-github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
-github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
+github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@@ -325,8 +324,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
-github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@@ -358,14 +357,12 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
-github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
-github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
-github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
-github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
@@ -551,8 +548,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
-github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
@@ -624,8 +621,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
-github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
+github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b h1:pJA5vV5MDtLX3KP7tGuT05QTquydQyxNAy0R75chrDY=
@@ -673,8 +670,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
@@ -712,10 +707,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
-github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
-github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
-github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
+github.com/nats-io/nats.go v1.39.0 h1:2/yg2JQjiYYKLwDuBzV0FbB2sIV+eFNkEevlRi4n9lI=
+github.com/nats-io/nats.go v1.39.0/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
+github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
+github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=
@@ -738,6 +733,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
+github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -754,16 +750,16 @@ github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlk
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
-github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
-github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
+github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 h1:AlRMRs5mHJcdiK83KKJyFVeybPMZ7dOUzC0l3k9aUa8=
github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9/go.mod h1:dyjfXBsNiroPWOdUZe7diUOUSLf6HQ/r2kEpwH/8zas=
@@ -774,8 +770,8 @@ github.com/rancher/wrangler/v3 v3.0.0/go.mod h1:Dfckuuq7MJk2JWVBDywRlZXMxEyPxHy4
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
@@ -785,12 +781,13 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
-github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
-github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
@@ -806,8 +803,8 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
-github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
+github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
@@ -838,6 +835,7 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
@@ -869,14 +867,14 @@ github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
-go.bytebuilders.dev/audit v0.0.38 h1:4DEYVfBNeAfI9rctVWmpDOvEExh3gIuPsXVAg4Z3FWA=
-go.bytebuilders.dev/audit v0.0.38/go.mod h1:6RC3gMMFpAzTzGJuJg8Ghgvebde4vvXkmDtUo11ypQw=
-go.bytebuilders.dev/license-proxyserver v0.0.19 h1:mY7zPDN0JCw2a1UajOuQUQKQKjjm5KBx2CbkT/+a1N8=
-go.bytebuilders.dev/license-proxyserver v0.0.19/go.mod h1:B3Ig2Fo1qUollSV9GgfyFK8tXBI0RmUSpP1KFMZ2N7Q=
-go.bytebuilders.dev/license-verifier v0.14.4 h1:JwTGQFew4nudwv8Pk3BdfQRts8KfgUQ5xhu138w1wt4=
-go.bytebuilders.dev/license-verifier v0.14.4/go.mod h1:LqWXJKee5ofDcCYM6T5WilYlUc4NlKeZz58tHwO8GEs=
-go.bytebuilders.dev/license-verifier/kubernetes v0.14.4 h1:NeHq6SuVhRIVaMW2kSXdr8DcuUOg2jVL9rsODIQp9Fc=
-go.bytebuilders.dev/license-verifier/kubernetes v0.14.4/go.mod h1:1C7SaOJShC60mIXP1hXBaDWGpb0hrHQ4p4nUEvI6YQY=
+go.bytebuilders.dev/audit v0.0.41 h1:zjTOCqxi95ZBzw65+i9lT2+RVABAL4evMZTMJlfPzMk=
+go.bytebuilders.dev/audit v0.0.41/go.mod h1:7sv+kb79UJ49GgH83218Tq9jlJcFsDTOjRSHF+2my/U=
+go.bytebuilders.dev/license-proxyserver v0.0.20 h1:gzRSwUmX/LSwPVE6T9oy5RLIutU1EeI7hmS+QGsYBY4=
+go.bytebuilders.dev/license-proxyserver v0.0.20/go.mod h1:2PJmjMCXncVyeP3fIVQ+hwZnuhmWSTmbcuEMBrFKIac=
+go.bytebuilders.dev/license-verifier v0.14.6 h1:0iHYGURUbx8toiXvFKftn/qMpeHzqHbAgEnEzOCNLvo=
+go.bytebuilders.dev/license-verifier v0.14.6/go.mod h1:LqWXJKee5ofDcCYM6T5WilYlUc4NlKeZz58tHwO8GEs=
+go.bytebuilders.dev/license-verifier/kubernetes v0.14.6 h1:NxmASX0A3lu+ABd4zuT5Ib+I63y3j5uJxmlUFEGxqWg=
+go.bytebuilders.dev/license-verifier/kubernetes v0.14.6/go.mod h1:N5QxsJF4EGLduOsTsW9gGfRuuMvN33T8pg5Y9NfKzuo=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
@@ -981,8 +979,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
-golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1262,6 +1260,7 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@@ -1509,8 +1508,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1593,12 +1592,14 @@ kmodules.xyz/openshift v0.29.0 h1:8PXjeQ+usUGkLnYUMSbZrMg+i1OIYBe9UWOeBf2FRzU=
kmodules.xyz/openshift v0.29.0/go.mod h1:neEinR+BcvP4vKKCkh9HpRK9e3c+upSst9E7rskZVuQ=
kmodules.xyz/prober v0.29.0 h1:Ex7m4F9rH7uWNNJlLgP63ROOM+nUATJkC2L5OQ7nwMg=
kmodules.xyz/prober v0.29.0/go.mod h1:UtK+HKyI1lFLEKX+HFLyOCVju6TO93zv3kwGpzqmKOo=
-kmodules.xyz/resource-metadata v0.18.12 h1:wHRYsvUXrX/620K0xXZlxnXJunZtHMuJbqDFZxSUs5E=
-kmodules.xyz/resource-metadata v0.18.12/go.mod h1:VvUjfIzmM08SZ9ssZKhduzSrggKjY93ES2Bk+/m04hs=
-kmodules.xyz/resource-metrics v0.30.2 h1:EGJapJa7Sh2ePc/WyHziLVh9xtuh4vWPBoSGxVZ8uC8=
-kmodules.xyz/resource-metrics v0.30.2/go.mod h1:UYcQQLN+3o8rNPQJwJa2D9bt5ihJCeo5bCDuQ4O3MPY=
+kmodules.xyz/resource-metadata v0.24.3 h1:yGXm6G1YIXru3mgDqDWsCPhGfotT3xBTKy+jd0bpF+E=
+kmodules.xyz/resource-metadata v0.24.3/go.mod h1:rPUZSMR0e1Vi+gONQ2ZhOFW+GvUeK+1AI7h9fzTZoKI=
+kmodules.xyz/resource-metrics v0.30.5 h1:ZhpGeR9DCz1HTrKUg/mWhr95wlFzCPRdgVAqwaggy1o=
+kmodules.xyz/resource-metrics v0.30.5/go.mod h1:w9+rz7/s/kGP1GWzYSuRdCn+l7EwpesmESSEHkLBnIQ=
kmodules.xyz/webhook-runtime v0.29.1 h1:SQ8NvwJpxv5CUuXIubSg9g0Bk8BBnj4dhCWEk6Ou8g8=
kmodules.xyz/webhook-runtime v0.29.1/go.mod h1:WVgNO4NqFdYwYZ82L6QIycN1Ax47y+yXi7XIHekYylo=
+moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7 h1:NykkTlRB+X40z86cLHdEmuoTxhNKhQebLT379b1EumA=
+moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE=
nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
@@ -1617,7 +1618,7 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
-stash.appscode.dev/apimachinery v0.38.1-0.20250114050236-cca8469a4c04 h1:NtQeear5dlS9PM6LoPVlfqRflavGvODoxa65RnI+w5Y=
-stash.appscode.dev/apimachinery v0.38.1-0.20250114050236-cca8469a4c04/go.mod h1:HoMcNxSg7TUHEhbHE+JvdhICrXoEKRvfLuFBKAM40ng=
+stash.appscode.dev/apimachinery v0.39.0 h1:jUYOFmpFgKhtsEbtmamlSgQTQ01boUzkv+25iAXzK5I=
+stash.appscode.dev/apimachinery v0.39.0/go.mod h1:6z4oQvMMBPyybs8uajBsrgO2ZK98ulB0CKGwfeuav+4=
x-helm.dev/apimachinery v0.0.16 h1:Eb160xcdH9fMVHak5QSWYWxoaReytch+A7kk25QWjx0=
x-helm.dev/apimachinery v0.0.16/go.mod h1:05brgFw5oWOX7OTXT090SQojqXjbttqWfqoJo+ejBU4=
diff --git a/pkg/controller/config.go b/pkg/controller/config.go
index 52c74c60c..2fe082ab2 100644
--- a/pkg/controller/config.go
+++ b/pkg/controller/config.go
@@ -113,13 +113,12 @@ func (c *Config) New() (*StashController, error) {
if err != nil {
return nil, fmt.Errorf("failed to extract cluster metadata, reason: %v", err)
}
+ nc := auditlib.NewNatsClient(c.ClientConfig, cmeta.UID, c.LicenseFile)
fn := auditlib.BillingEventCreator{
Mapper: mapper,
ClusterMetadata: cmeta,
}
- auditor = auditlib.NewResilientEventPublisher(func() (*auditlib.NatsConfig, auditlib.LicenseIDGetter, error) {
- return auditlib.NewNatsConfig(c.ClientConfig, cmeta.UID, c.LicenseFile)
- }, mapper, fn.CreateEvent)
+ auditor = auditlib.NewEventPublisher(nc, mapper, fn.CreateEvent)
err = auditor.SetupSiteInfoPublisher(c.ClientConfig, c.KubeClient, informerFactory)
if err != nil {
return nil, fmt.Errorf("failed to setup site info publisher, reason: %v", err)
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
index f12626423..f95a504fe 100644
--- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
## 3.2.0 (2022-11-28)
### Added
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
index 0e7b5c713..9ca87a2c7 100644
--- a/vendor/github.com/Masterminds/semver/v3/Makefile
+++ b/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -19,6 +19,7 @@ test-cover:
.PHONY: fuzz
fuzz:
@echo "==> Running Fuzz Tests"
+ go env GOCACHE
go test -fuzz=FuzzNewVersion -fuzztime=15s .
go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
go test -fuzz=FuzzNewConstraint -fuzztime=15s .
@@ -27,4 +28,4 @@ $(GOLANGCI_LINT):
# Install golangci-lint. The configuration for it is in the .golangci.yml
# file in the root of the repository
echo ${GOPATH}
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
index eab8cac3b..ed5693608 100644
--- a/vendor/github.com/Masterminds/semver/v3/README.md
+++ b/vendor/github.com/Masterminds/semver/v3/README.md
@@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
[](https://goreportcard.com/report/github.com/Masterminds/semver)
-If you are looking for a command line tool for version comparisons please see
-[vert](https://github.com/Masterminds/vert) which uses this library.
-
## Package Versions
-Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
There are three major versions fo the `semver` package.
@@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on
differences to notes between these two methods of comparison.
1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
+ and others it will follow the specification and always include pre-releases
within the comparison. It will provide an answer that is valid with the
comparison section of the spec at https://semver.org/#spec-item-11
2. When constraint checking is used for checks or validation it will follow a
different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
+ and Rust/Cargo. This includes considering pre-releases to be invalid if the
ranges does not include one. If you want to have it include pre-releases a
simple solution is to include `-0` in your range.
3. Constraint ranges can have some complex rules including the shorthand use of
@@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parsable.
}
-// Check if the version meets the constraints. The a variable will be true.
+// Check if the version meets the constraints. The variable a will be true.
a := c.Check(v)
```
@@ -137,20 +134,20 @@ The basic comparisons are:
### Working With Prerelease Versions
Pre-releases, for those not familiar with them, are used for software releases
-prior to stable or generally available releases. Examples of prereleases include
-development, alpha, beta, and release candidate releases. A prerelease may be
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
-order of precedence, prereleases come before their associated releases. In this
+order of precedence, pre-releases come before their associated releases. In this
example `1.2.3-beta.1 < 1.2.3`.
-According to the Semantic Version specification prereleases may not be
+According to the Semantic Version specification, pre-releases may not be
API compliant with their release counterpart. It says,
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
-SemVer comparisons using constraints without a prerelease comparator will skip
-prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
-at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
The reason for the `0` as a pre-release version in the example comparison is
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
@@ -171,6 +168,9 @@ These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
### Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
index 7c4bed334..ff499fb66 100644
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) {
original: v,
}
- // check for prerelease or build metadata
- var extra []string
- if strings.ContainsAny(parts[2], "-+") {
- // Start with the build metadata first as it needs to be on the right
- extra = strings.SplitN(parts[2], "+", 2)
- if len(extra) > 1 {
- // build metadata found
- sv.metadata = extra[1]
- parts[2] = extra[0]
+ // Extract build metadata
+ if strings.Contains(parts[2], "+") {
+ extra := strings.SplitN(parts[2], "+", 2)
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ if err := validateMetadata(sv.metadata); err != nil {
+ return nil, err
}
+ }
- extra = strings.SplitN(parts[2], "-", 2)
- if len(extra) > 1 {
- // prerelease found
- sv.pre = extra[1]
- parts[2] = extra[0]
+ // Extract build prerelease
+ if strings.Contains(parts[2], "-") {
+ extra := strings.SplitN(parts[2], "-", 2)
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ if err := validatePrerelease(sv.pre); err != nil {
+ return nil, err
}
}
@@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) {
}
}
- // Extract the major, minor, and patch elements onto the returned Version
+ // Extract major, minor, and patch
var err error
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
if err != nil {
@@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) {
return nil, err
}
- // No prerelease or build metadata found so returning now as a fastpath.
- if sv.pre == "" && sv.metadata == "" {
- return sv, nil
- }
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
return sv, nil
}
@@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
// GreaterThan tests if one version is greater than another one.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
// Equal tests if two versions are equal to each other.
// Note, versions can be equal with different metadata since metadata
// is not considered part of the comparable version.
func (v *Version) Equal(o *Version) bool {
+ if v == o {
+ return true
+ }
+ if v == nil || o == nil {
+ return false
+ }
return v.Compare(o) == 0
}
diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
index 5edd5a7ca..92b78048e 100644
--- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
+++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md
@@ -1,6 +1,24 @@
# Change history of go-restful
-## [v3.11.0] - 2023-08-19
+
+## [v3.12.1] - 2024-05-28
+
+- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen)
+
+## [v3.12.0] - 2024-03-11
+
+- add Flush method #529 (#538)
+- fix: Improper handling of empty POST requests (#543)
+
+## [v3.11.3] - 2024-01-09
+
+- better not have 2 tags on one commit
+
+## [v3.11.1, v3.11.2] - 2024-01-09
+
+- fix by restoring custom JSON handler functions (Mike Beaumont #540)
+
+## [v3.12.0] - 2023-08-19
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md
index e3e30080e..7234604e4 100644
--- a/vendor/github.com/emicklei/go-restful/v3/README.md
+++ b/vendor/github.com/emicklei/go-restful/v3/README.md
@@ -2,7 +2,6 @@ go-restful
==========
package for building REST-style Web Services using Google Go
-[](https://travis-ci.org/emicklei/go-restful)
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
[](https://pkg.go.dev/github.com/emicklei/go-restful)
[](https://codecov.io/gh/emicklei/go-restful)
@@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package.
- Trace logging
- Compression
- Encoders for other serializers
-- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
-- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
+- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/`
## Resources
diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go
index 1ff239f99..80adf55fd 100644
--- a/vendor/github.com/emicklei/go-restful/v3/compress.go
+++ b/vendor/github.com/emicklei/go-restful/v3/compress.go
@@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
+// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it.
+func (c *CompressingResponseWriter) Flush() {
+ flusher, ok := c.writer.(http.Flusher)
+ if !ok {
+ // writer doesn't support http.Flusher interface
+ return
+ }
+ flusher.Flush()
+}
+
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go
index ba1fc5d5f..6fd2bcd5a 100644
--- a/vendor/github.com/emicklei/go-restful/v3/curly.go
+++ b/vendor/github.com/emicklei/go-restful/v3/curly.go
@@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute(
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
candidates := make(sortableCurlyRoutes, 0, 8)
- for _, each := range ws.routes {
- matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb)
+ for _, eachRoute := range ws.routes {
+ matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb)
if matches {
- candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+ candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(candidates)
@@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin
return false, 0, 0
}
requestToken := requestTokens[i]
- if routeHasCustomVerb && hasCustomVerb(routeToken){
+ if routeHasCustomVerb && hasCustomVerb(routeToken) {
if !isMatchCustomVerb(routeToken, requestToken) {
return false, 0, 0
}
@@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques
// detectWebService returns the best matching webService given the list of path tokens.
// see also computeWebserviceScore
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
- var best *WebService
+ var bestWs *WebService
score := -1
- for _, each := range webServices {
- matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+ for _, eachWS := range webServices {
+ matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens)
if matches && (eachScore > score) {
- best = each
+ bestWs = eachWS
score = eachScore
}
}
- return best
+ return bestWs
}
// computeWebserviceScore returns whether tokens match and
// the weighted score of the longest matching consecutive tokens from the beginning.
-func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
- if len(tokens) > len(requestTokens) {
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) {
+ if len(routeTokens) > len(requestTokens) {
return false, 0
}
score := 0
- for i := 0; i < len(tokens); i++ {
- each := requestTokens[i]
- other := tokens[i]
- if len(each) == 0 && len(other) == 0 {
+ for i := 0; i < len(routeTokens); i++ {
+ eachRequestToken := requestTokens[i]
+ eachRouteToken := routeTokens[i]
+ if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 {
score++
continue
}
- if len(other) > 0 && strings.HasPrefix(other, "{") {
+ if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") {
// no empty match
- if len(each) == 0 {
+ if len(eachRequestToken) == 0 {
return false, score
}
- score += 1
+ score++
+
+ if colon := strings.Index(eachRouteToken, ":"); colon != -1 {
+ // match by regex
+ matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken)
+ if matchesToken {
+ score++ // extra score for regex match
+ }
+ }
} else {
// not a parameter
- if each != other {
+ if eachRequestToken != eachRouteToken {
return false, score
}
- score += (len(tokens) - i) * 10 //fuzzy
+ score += (len(routeTokens) - i) * 10 //fuzzy
}
}
return true, score
diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
index 66dfc824f..9808752ac 100644
--- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
+++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go
@@ -5,11 +5,18 @@ package restful
// that can be found in the LICENSE file.
import (
+ "encoding/json"
"encoding/xml"
"strings"
"sync"
)
+var (
+ MarshalIndent = json.MarshalIndent
+ NewDecoder = json.NewDecoder
+ NewEncoder = json.NewEncoder
+)
+
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.
diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go
deleted file mode 100644
index 871165166..000000000
--- a/vendor/github.com/emicklei/go-restful/v3/json.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !jsoniter
-
-package restful
-
-import "encoding/json"
-
-var (
- MarshalIndent = json.MarshalIndent
- NewDecoder = json.NewDecoder
- NewEncoder = json.NewEncoder
-)
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go
deleted file mode 100644
index 11b8f8ae7..000000000
--- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build jsoniter
-
-package restful
-
-import "github.com/json-iterator/go"
-
-var (
- json = jsoniter.ConfigCompatibleWithStandardLibrary
- MarshalIndent = json.MarshalIndent
- NewDecoder = json.NewDecoder
- NewEncoder = json.NewEncoder
-)
diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
index 07a0c91e9..a9b3faaa8 100644
--- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go
+++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go
@@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
if (method == http.MethodPost ||
method == http.MethodPut ||
- method == http.MethodPatch) && length == "" {
+ method == http.MethodPatch) && (length == "" || length == "0") {
return nil, NewError(
http.StatusUnsupportedMediaType,
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
new file mode 100644
index 000000000..22f8d21cc
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml
@@ -0,0 +1,61 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ golint:
+ min-confidence: 0
+ gocyclo:
+ min-complexity: 45
+ maligned:
+ suggest-new: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - unparam
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
+ - wrapcheck
+ - testpackage
+ - nlreturn
+ - gomnd
+ - exhaustivestruct
+ - goerr113
+ - errorlint
+ - nestif
+ - godot
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
+ - ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
+ - nosnakecase
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
index 813788aff..0108f1d57 100644
--- a/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -1,6 +1,10 @@
-# gojsonpointer [](https://travis-ci.org/go-openapi/jsonpointer) [](https://codecov.io/gh/go-openapi/jsonpointer) [](https://slackin.goswagger.io)
+# gojsonpointer [](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonpointer)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonpointer)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonpointer)
-[](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonpointer)
An implementation of JSON Pointer - Go language
## Status
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index de60dc7dd..d970c7cf4 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -110,19 +110,39 @@ func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
+func isNil(input any) bool {
+ if input == nil {
+ return true
+ }
+
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
+ if isNil(node) {
+ return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken)
+ }
- if rValue.Type().Implements(jsonPointableType) {
- r, err := node.(JSONPointable).JSONLookup(decodedToken)
+ switch typed := node.(type) {
+ case JSONPointable:
+ r, err := typed.JSONLookup(decodedToken)
if err != nil {
return nil, kind, err
}
return r, kind, nil
+ case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect
+ return getSingleImpl(*typed, decodedToken, nameProvider)
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -170,7 +190,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP
return node.(JSONSetable).JSONSet(decodedToken, data)
}
- switch rValue.Kind() {
+ switch rValue.Kind() { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -231,8 +251,7 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K
if err != nil {
return nil, knd, err
}
- node, kind = r, knd
-
+ node = r
}
rValue := reflect.ValueOf(node)
@@ -245,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
- return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+ return errors.New("only structs, pointers, maps and slices are supported for setting values")
}
if nameProvider == nil {
@@ -284,7 +303,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
continue
}
- switch kind {
+ switch kind { //nolint:exhaustive
case reflect.Struct:
nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
if !ok {
@@ -405,11 +424,11 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
case json.Delim:
switch tk {
case '{':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
case '[':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
}
@@ -435,20 +454,21 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
if err != nil {
return 0, err
}
- switch tk := tk.(type) {
- case json.Delim:
- switch tk {
+
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
case '{':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
case '[':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return 0, err
}
}
}
}
+
if !dec.More() {
return 0, fmt.Errorf("token reference %q not found", decodedToken)
}
@@ -456,27 +476,27 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
}
// drainSingle drains a single level of object or array.
-// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed.
+// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed.
func drainSingle(dec *json.Decoder) error {
for dec.More() {
tk, err := dec.Token()
if err != nil {
return err
}
- switch tk := tk.(type) {
- case json.Delim:
- switch tk {
+ if delim, isDelim := tk.(json.Delim); isDelim {
+ switch delim {
case '{':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return err
}
case '[':
- if err := drainSingle(dec); err != nil {
+ if err = drainSingle(dec); err != nil {
return err
}
}
}
}
+
// Consumes the ending delim
if _, err := dec.Token(); err != nil {
return err
@@ -498,14 +518,14 @@ const (
// Unescape unescapes a json pointer reference token string to the original representation
func Unescape(token string) string {
- step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
- step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+ step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1)
+ step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0)
return step2
}
// Escape escapes a pointer reference token string
func Escape(token string) string {
- step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
- step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+ step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0)
+ step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1)
return step2
}
diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
index 013fc1943..22f8d21cc 100644
--- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml
+++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
@@ -1,50 +1,61 @@
linters-settings:
govet:
check-shadowing: true
+ golint:
+ min-confidence: 0
gocyclo:
- min-complexity: 30
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 2
- min-occurrences: 4
- paralleltest:
- ignore-missing: true
+ min-occurrences: 3
+
linters:
enable-all: true
disable:
- maligned
+ - unparam
- lll
+ - gochecknoinits
- gochecknoglobals
+ - funlen
- godox
- gocognit
- whitespace
- wsl
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - scopelint
- wrapcheck
- - exhaustivestruct
- - exhaustive
- - nlreturn
- testpackage
- - gci
- - gofumpt
- - goerr113
+ - nlreturn
- gomnd
- - tparallel
+ - exhaustivestruct
+ - goerr113
+ - errorlint
- nestif
- godot
- - errorlint
- - varcheck
- - interfacer
- - deadcode
- - golint
+ - gofumpt
+ - paralleltest
+ - tparallel
+ - thelper
- ifshort
+ - exhaustruct
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
+ - nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
- structcheck
+ - golint
- nosnakecase
- - varnamelen
- - exhaustruct
diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md
index b94753aa5..c7fc2049c 100644
--- a/vendor/github.com/go-openapi/jsonreference/README.md
+++ b/vendor/github.com/go-openapi/jsonreference/README.md
@@ -1,15 +1,19 @@
-# gojsonreference [](https://travis-ci.org/go-openapi/jsonreference) [](https://codecov.io/gh/go-openapi/jsonreference) [](https://slackin.goswagger.io)
+# gojsonreference [](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonreference)
+
+[](https://slackin.goswagger.io)
+[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE)
+[](https://pkg.go.dev/github.com/go-openapi/jsonreference)
+[](https://goreportcard.com/report/github.com/go-openapi/jsonreference)
-[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [](http://godoc.org/github.com/go-openapi/jsonreference)
An implementation of JSON Reference - Go language
## Status
Feature complete. Stable API
## Dependencies
-https://github.com/go-openapi/jsonpointer
+* https://github.com/go-openapi/jsonpointer
## References
-http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
-http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
+* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
index d69b53acc..c4b1b64f0 100644
--- a/vendor/github.com/go-openapi/swag/.gitignore
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -2,3 +2,4 @@ secrets.yml
vendor
Godeps
.idea
+*.out
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
index bf503e400..80e2be004 100644
--- a/vendor/github.com/go-openapi/swag/.golangci.yml
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -4,14 +4,14 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
- min-complexity: 25
+ min-complexity: 45
maligned:
suggest-new: true
dupl:
- threshold: 100
+ threshold: 200
goconst:
min-len: 3
- min-occurrences: 2
+ min-occurrences: 3
linters:
enable-all: true
@@ -20,35 +20,41 @@ linters:
- lll
- gochecknoinits
- gochecknoglobals
- - nlreturn
- - testpackage
+ - funlen
+ - godox
+ - gocognit
+ - whitespace
+ - wsl
- wrapcheck
+ - testpackage
+ - nlreturn
- gomnd
- - exhaustive
- exhaustivestruct
- goerr113
- - wsl
- - whitespace
- - gofumpt
- - godot
+ - errorlint
- nestif
- - godox
- - funlen
- - gci
- - gocognit
+ - godot
+ - gofumpt
- paralleltest
+ - tparallel
- thelper
- ifshort
- - gomoddirectives
- - cyclop
- - forcetypeassert
- - ireturn
- - tagliatelle
- - varnamelen
- - goimports
- - tenv
- - golint
- exhaustruct
- - nilnil
+ - varnamelen
+ - gci
+ - depguard
+ - errchkjson
+ - inamedparam
- nonamedreturns
+ - musttag
+ - ireturn
+ - forcetypeassert
+ - cyclop
+ # deprecated linters
+ - deadcode
+ - interfacer
+ - scopelint
+ - varcheck
+ - structcheck
+ - golint
- nosnakecase
diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md
new file mode 100644
index 000000000..e7f28ed6b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md
@@ -0,0 +1,52 @@
+# Benchmarks
+
+## Name mangling utilities
+
+```bash
+go test -bench XXX -run XXX -benchtime 30s
+```
+
+### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op
+BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op
+BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op
+```
+
+### Benchmarks after PR #79
+
+~ x10 performance improvement and ~ /100 memory allocations.
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz
+BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op
+```
+
+```
+goos: linux
+goarch: amd64
+pkg: github.com/go-openapi/swag
+cpu: AMD Ryzen 7 5800X 8-Core Processor
+BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op
+BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op
+BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op
+BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op
+BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op
+```
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
index 217f6fa50..a72922299 100644
--- a/vendor/github.com/go-openapi/swag/README.md
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -1,7 +1,8 @@
-# Swag [](https://travis-ci.org/go-openapi/swag) [](https://codecov.io/gh/go-openapi/swag) [](https://slackin.goswagger.io)
+# Swag [](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/swag)
+[](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
-[](http://godoc.org/github.com/go-openapi/swag)
+[](https://pkg.go.dev/github.com/go-openapi/swag)
[](https://goreportcard.com/report/github.com/go-openapi/swag)
Contains a bunch of helper functions for go-openapi and go-swagger projects.
@@ -18,4 +19,5 @@ You may also use it standalone for your projects.
This repo has only few dependencies outside of the standard library:
-* YAML utilities depend on gopkg.in/yaml.v2
+* YAML utilities depend on `gopkg.in/yaml.v3`
+* `github.com/mailru/easyjson v0.7.7`
diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go
new file mode 100644
index 000000000..20a359bb6
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/initialism_index.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+ "sort"
+ "strings"
+ "sync"
+)
+
+var (
+ // commonInitialisms are common acronyms that are kept as whole uppercased words.
+ commonInitialisms *indexOfInitialisms
+
+ // initialisms is a slice of sorted initialisms
+ initialisms []string
+
+ // a copy of initialisms pre-baked as []rune
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune
+
+ isInitialism func(string) bool
+
+ maxAllocMatches int
+)
+
+func init() {
+ // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+ configuredInitialisms := map[string]bool{
+ "ACL": true,
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "GUID": true,
+ "HTML": true,
+ "HTTPS": true,
+ "HTTP": true,
+ "ID": true,
+ "IP": true,
+ "IPv4": true,
+ "IPv6": true,
+ "JSON": true,
+ "LHS": true,
+ "OAI": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SMTP": true,
+ "SQL": true,
+ "SSH": true,
+ "TCP": true,
+ "TLS": true,
+ "TTL": true,
+ "UDP": true,
+ "UI": true,
+ "UID": true,
+ "UUID": true,
+ "URI": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+ "XMPP": true,
+ "XSRF": true,
+ "XSS": true,
+ }
+
+ // a thread-safe index of initialisms
+ commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+ maxAllocMatches = maxAllocHeuristic(initialismsRunes)
+
+ // a test function
+ isInitialism = commonInitialisms.isInitialism
+}
+
+func asRunes(in []string) [][]rune {
+ out := make([][]rune, len(in))
+ for i, initialism := range in {
+ out[i] = []rune(initialism)
+ }
+
+ return out
+}
+
+func asUpperCased(in []string) [][]rune {
+ out := make([][]rune, len(in))
+
+ for i, initialism := range in {
+ out[i] = []rune(upper(trim(initialism)))
+ }
+
+ return out
+}
+
+func maxAllocHeuristic(in [][]rune) int {
+ heuristic := make(map[rune]int)
+ for _, initialism := range in {
+ heuristic[initialism[0]]++
+ }
+
+ var maxAlloc int
+ for _, val := range heuristic {
+ if val > maxAlloc {
+ maxAlloc = val
+ }
+ }
+
+ return maxAlloc
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+ for _, word := range words {
+ // commonInitialisms[upper(word)] = true
+ commonInitialisms.add(upper(word))
+ }
+ // sort again
+ initialisms = commonInitialisms.sorted()
+ initialismsRunes = asRunes(initialisms)
+ initialismsUpperCased = asUpperCased(initialisms)
+}
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+ sortMutex *sync.Mutex
+ index *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+ return &indexOfInitialisms{
+ sortMutex: new(sync.Mutex),
+ index: new(sync.Map),
+ }
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ for k, v := range initial {
+ m.index.Store(k, v)
+ }
+ return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+ _, ok := m.index.Load(key)
+ return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+ m.index.Store(key, true)
+ return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+ m.sortMutex.Lock()
+ defer m.sortMutex.Unlock()
+ m.index.Range(func(key, _ interface{}) bool {
+ k := key.(string)
+ result = append(result, k)
+ return true
+ })
+ sort.Sort(sort.Reverse(byInitialism(result)))
+ return
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+ return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+ if len(s[i]) != len(s[j]) {
+ return len(s[i]) < len(s[j])
+ }
+
+ return strings.Compare(s[i], s[j]) > 0
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
index 00038c377..783442fdd 100644
--- a/vendor/github.com/go-openapi/swag/loading.go
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -21,6 +21,7 @@ import (
"net/http"
"net/url"
"os"
+ "path"
"path/filepath"
"runtime"
"strings"
@@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = ""
var LoadHTTPCustomHeaders = map[string]string{}
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
-func LoadFromFileOrHTTP(path string) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+func LoadFromFileOrHTTP(pth string) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
-func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
- return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path)
+func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) {
+ return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth)
}
-// LoadStrategy returns a loader function for a given path or uri
-func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
- if strings.HasPrefix(path, "http") {
+// LoadStrategy returns a loader function for a given path or URI.
+//
+// The load strategy returns the remote load for any path starting with `http`.
+// So this works for any URI with a scheme `http` or `https`.
+//
+// The fallback strategy is to call the local loader.
+//
+// The local loader takes a local file system path (absolute or relative) as argument,
+// or alternatively a `file://...` URI, **without host** (see also below for windows).
+//
+// There are a few liberalities, initially intended to be tolerant regarding the URI syntax,
+// especially on windows.
+//
+// Before the local loader is called, the given path is transformed:
+// - percent-encoded characters are unescaped
+// - simple paths (e.g. `./folder/file`) are passed as-is
+// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too.
+//
+// For paths provided as URIs with the "file" scheme, please note that:
+// - `file://` is simply stripped.
+// This means that the host part of the URI is not parsed at all.
+// For example, `file:///folder/file" becomes "/folder/file`,
+// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems.
+// Similarly, `file://./folder/file` yields `./folder/file`.
+// - on windows, `file://...` can take a host so as to specify an UNC share location.
+//
+// Reminder about windows-specifics:
+// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported)
+// - `file:///c:/folder/file` becomes `C:\folder\file`
+// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file`
+func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+ if strings.HasPrefix(pth, "http") {
return remote
}
- return func(pth string) ([]byte, error) {
- upth, err := pathUnescape(pth)
+
+ return func(p string) ([]byte, error) {
+ upth, err := url.PathUnescape(p)
if err != nil {
return nil, err
}
- if strings.HasPrefix(pth, `file://`) {
- if runtime.GOOS == "windows" {
- // support for canonical file URIs on windows.
- // Zero tolerance here for dodgy URIs.
- u, _ := url.Parse(upth)
- if u.Host != "" {
- // assume UNC name (volume share)
- // file://host/share/folder\... ==> \\host\share\path\folder
- // NOTE: UNC port not yet supported
- upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`)
- } else {
- // file:///c:/folder/... ==> just remove the leading slash
- upth = strings.TrimPrefix(upth, `file:///`)
- }
- } else {
- upth = strings.TrimPrefix(upth, `file://`)
+ if !strings.HasPrefix(p, `file://`) {
+ // regular file path provided: just normalize slashes
+ return local(filepath.FromSlash(upth))
+ }
+
+ if runtime.GOOS != "windows" {
+ // crude processing: this leaves full URIs with a host with a (mostly) unexpected result
+ upth = strings.TrimPrefix(upth, `file://`)
+
+ return local(filepath.FromSlash(upth))
+ }
+
+ // windows-only pre-processing of file://... URIs
+
+ // support for canonical file URIs on windows.
+ u, err := url.Parse(filepath.ToSlash(upth))
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Host != "" {
+ // assume UNC name (volume share)
+ // NOTE: UNC port not yet supported
+
+ // when the "host" segment is a drive letter:
+ // file://C:/folder/... => C:\folder
+ upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`))
+ if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' {
+ // tolerance: if we have a leading dot, this can't be a host
+ // file://host/share/folder\... ==> \\host\share\path\folder
+ upth = "//" + upth
+ }
+ } else {
+ // no host, let's figure out if this is a drive letter
+ upth = strings.TrimPrefix(upth, `file://`)
+ first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/")
+ if strings.HasSuffix(first, ":") {
+ // drive letter in the first segment:
+ // file:///c:/folder/... ==> strip the leading slash
+ upth = strings.TrimPrefix(upth, `/`)
}
}
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
index aa7f6a9bb..8bb64ac32 100644
--- a/vendor/github.com/go-openapi/swag/name_lexem.go
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -14,74 +14,80 @@
package swag
-import "unicode"
+import (
+ "unicode"
+ "unicode/utf8"
+)
type (
- nameLexem interface {
- GetUnsafeGoName() string
- GetOriginal() string
- IsInitialism() bool
- }
+ lexemKind uint8
- initialismNameLexem struct {
+ nameLexem struct {
original string
matchedInitialism string
+ kind lexemKind
}
+)
- casualNameLexem struct {
- original string
- }
+const (
+ lexemKindCasualName lexemKind = iota
+ lexemKindInitialismName
)
-func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
- return &initialismNameLexem{
+func newInitialismNameLexem(original, matchedInitialism string) nameLexem {
+ return nameLexem{
+ kind: lexemKindInitialismName,
original: original,
matchedInitialism: matchedInitialism,
}
}
-func newCasualNameLexem(original string) *casualNameLexem {
- return &casualNameLexem{
+func newCasualNameLexem(original string) nameLexem {
+ return nameLexem{
+ kind: lexemKindCasualName,
original: original,
}
}
-func (l *initialismNameLexem) GetUnsafeGoName() string {
- return l.matchedInitialism
-}
+func (l nameLexem) GetUnsafeGoName() string {
+ if l.kind == lexemKindInitialismName {
+ return l.matchedInitialism
+ }
+
+ var (
+ first rune
+ rest string
+ )
-func (l *casualNameLexem) GetUnsafeGoName() string {
- var first rune
- var rest string
for i, orig := range l.original {
if i == 0 {
first = orig
continue
}
+
if i > 0 {
rest = l.original[i:]
break
}
}
+
if len(l.original) > 1 {
- return string(unicode.ToUpper(first)) + lower(rest)
+ b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(b)
+ }()
+ b.WriteRune(unicode.ToUpper(first))
+ b.WriteString(lower(rest))
+ return b.String()
}
return l.original
}
-func (l *initialismNameLexem) GetOriginal() string {
+func (l nameLexem) GetOriginal() string {
return l.original
}
-func (l *casualNameLexem) GetOriginal() string {
- return l.original
-}
-
-func (l *initialismNameLexem) IsInitialism() bool {
- return true
-}
-
-func (l *casualNameLexem) IsInitialism() bool {
- return false
+func (l nameLexem) IsInitialism() bool {
+ return l.kind == lexemKindInitialismName
}
diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go
deleted file mode 100644
index f5228b82c..000000000
--- a/vendor/github.com/go-openapi/swag/post_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.8
-// +build go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.PathUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go
deleted file mode 100644
index 7c7da9c08..000000000
--- a/vendor/github.com/go-openapi/swag/post_go19.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.9
-// +build go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Since go1.9, this may be implemented with sync.Map.
-type indexOfInitialisms struct {
- sortMutex *sync.Mutex
- index *sync.Map
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- sortMutex: new(sync.Mutex),
- index: new(sync.Map),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- for k, v := range initial {
- m.index.Store(k, v)
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- _, ok := m.index.Load(key)
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.index.Store(key, true)
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.sortMutex.Lock()
- defer m.sortMutex.Unlock()
- m.index.Range(func(key, value interface{}) bool {
- k := key.(string)
- result = append(result, k)
- return true
- })
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go
deleted file mode 100644
index 2757d9b95..000000000
--- a/vendor/github.com/go-openapi/swag/pre_go18.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.8
-// +build !go1.8
-
-package swag
-
-import "net/url"
-
-func pathUnescape(path string) (string, error) {
- return url.QueryUnescape(path)
-}
diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go
deleted file mode 100644
index 0565db377..000000000
--- a/vendor/github.com/go-openapi/swag/pre_go19.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 go-swagger maintainers
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.9
-// +build !go1.9
-
-package swag
-
-import (
- "sort"
- "sync"
-)
-
-// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
-// Before go1.9, this may be implemented with a mutex on the map.
-type indexOfInitialisms struct {
- getMutex *sync.Mutex
- index map[string]bool
-}
-
-func newIndexOfInitialisms() *indexOfInitialisms {
- return &indexOfInitialisms{
- getMutex: new(sync.Mutex),
- index: make(map[string]bool, 50),
- }
-}
-
-func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k, v := range initial {
- m.index[k] = v
- }
- return m
-}
-
-func (m *indexOfInitialisms) isInitialism(key string) bool {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- _, ok := m.index[key]
- return ok
-}
-
-func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- m.index[key] = true
- return m
-}
-
-func (m *indexOfInitialisms) sorted() (result []string) {
- m.getMutex.Lock()
- defer m.getMutex.Unlock()
- for k := range m.index {
- result = append(result, k)
- }
- sort.Sort(sort.Reverse(byInitialism(result)))
- return
-}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
index a1825fb7d..274727a86 100644
--- a/vendor/github.com/go-openapi/swag/split.go
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -15,124 +15,269 @@
package swag
import (
+ "bytes"
+ "sync"
"unicode"
+ "unicode/utf8"
)
-var nameReplaceTable = map[rune]string{
- '@': "At ",
- '&': "And ",
- '|': "Pipe ",
- '$': "Dollar ",
- '!': "Bang ",
- '-': "",
- '_': "",
-}
-
type (
splitter struct {
- postSplitInitialismCheck bool
initialisms []string
+ initialismsRunes [][]rune
+ initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version
+ postSplitInitialismCheck bool
+ }
+
+ splitterOption func(*splitter)
+
+ initialismMatch struct {
+ body []rune
+ start, end int
+ complete bool
+ }
+ initialismMatches []initialismMatch
+)
+
+type (
+ // memory pools of temporary objects.
+ //
+ // These are used to recycle temporarily allocated objects
+ // and relieve the GC from undue pressure.
+
+ matchesPool struct {
+ *sync.Pool
}
- splitterOption func(*splitter) *splitter
+ buffersPool struct {
+ *sync.Pool
+ }
+
+ lexemsPool struct {
+ *sync.Pool
+ }
+
+ splittersPool struct {
+ *sync.Pool
+ }
)
-// split calls the splitter; splitter provides more control and post options
+var (
+ // poolOfMatches holds temporary slices for recycling during the initialism match process
+ poolOfMatches = matchesPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make(initialismMatches, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfBuffers = buffersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ return new(bytes.Buffer)
+ },
+ },
+ }
+
+ poolOfLexems = lexemsPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := make([]nameLexem, 0, maxAllocMatches)
+
+ return &s
+ },
+ },
+ }
+
+ poolOfSplitters = splittersPool{
+ Pool: &sync.Pool{
+ New: func() any {
+ s := newSplitter()
+
+ return &s
+ },
+ },
+ }
+)
+
+// nameReplaceTable finds a word representation for special characters.
+func nameReplaceTable(r rune) (string, bool) {
+ switch r {
+ case '@':
+ return "At ", true
+ case '&':
+ return "And ", true
+ case '|':
+ return "Pipe ", true
+ case '$':
+ return "Dollar ", true
+ case '!':
+ return "Bang ", true
+ case '-':
+ return "", true
+ case '_':
+ return "", true
+ default:
+ return "", false
+ }
+}
+
+// split calls the splitter.
+//
+// Use newSplitter for more control and options
func split(str string) []string {
- lexems := newSplitter().split(str)
- result := make([]string, 0, len(lexems))
+ s := poolOfSplitters.BorrowSplitter()
+ lexems := s.split(str)
+ result := make([]string, 0, len(*lexems))
- for _, lexem := range lexems {
+ for _, lexem := range *lexems {
result = append(result, lexem.GetOriginal())
}
+ poolOfLexems.RedeemLexems(lexems)
+ poolOfSplitters.RedeemSplitter(s)
return result
}
-func (s *splitter) split(str string) []nameLexem {
- return s.toNameLexems(str)
-}
-
-func newSplitter(options ...splitterOption) *splitter {
- splitter := &splitter{
+func newSplitter(options ...splitterOption) splitter {
+ s := splitter{
postSplitInitialismCheck: false,
initialisms: initialisms,
+ initialismsRunes: initialismsRunes,
+ initialismsUpperCased: initialismsUpperCased,
}
for _, option := range options {
- splitter = option(splitter)
+ option(&s)
}
- return splitter
+ return s
}
// withPostSplitInitialismCheck allows to catch initialisms after main split process
-func withPostSplitInitialismCheck(s *splitter) *splitter {
+func withPostSplitInitialismCheck(s *splitter) {
s.postSplitInitialismCheck = true
+}
+
+func (p matchesPool) BorrowMatches() *initialismMatches {
+ s := p.Get().(*initialismMatches)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
return s
}
-type (
- initialismMatch struct {
- start, end int
- body []rune
- complete bool
+func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer {
+ s := p.Get().(*bytes.Buffer)
+ s.Reset()
+
+ if s.Cap() < size {
+ s.Grow(size)
}
- initialismMatches []*initialismMatch
-)
-func (s *splitter) toNameLexems(name string) []nameLexem {
+ return s
+}
+
+func (p lexemsPool) BorrowLexems() *[]nameLexem {
+ s := p.Get().(*[]nameLexem)
+ *s = (*s)[:0] // reset slice, keep allocated capacity
+
+ return s
+}
+
+func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter {
+ s := p.Get().(*splitter)
+ s.postSplitInitialismCheck = false // reset options
+ for _, apply := range options {
+ apply(s)
+ }
+
+ return s
+}
+
+func (p matchesPool) RedeemMatches(s *initialismMatches) {
+ p.Put(s)
+}
+
+func (p buffersPool) RedeemBuffer(s *bytes.Buffer) {
+ p.Put(s)
+}
+
+func (p lexemsPool) RedeemLexems(s *[]nameLexem) {
+ p.Put(s)
+}
+
+func (p splittersPool) RedeemSplitter(s *splitter) {
+ p.Put(s)
+}
+
+func (m initialismMatch) isZero() bool {
+ return m.start == 0 && m.end == 0
+}
+
+func (s splitter) split(name string) *[]nameLexem {
nameRunes := []rune(name)
matches := s.gatherInitialismMatches(nameRunes)
+ if matches == nil {
+ return poolOfLexems.BorrowLexems()
+ }
+
return s.mapMatchesToNameLexems(nameRunes, matches)
}
-func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
- matches := make(initialismMatches, 0)
+func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches {
+ var matches *initialismMatches
for currentRunePosition, currentRune := range nameRunes {
- newMatches := make(initialismMatches, 0, len(matches))
+ // recycle these allocations as we loop over runes
+ // with such recycling, only 2 slices should be allocated per call
+ // instead of o(n).
+ newMatches := poolOfMatches.BorrowMatches()
// check current initialism matches
- for _, match := range matches {
- if keepCompleteMatch := match.complete; keepCompleteMatch {
- newMatches = append(newMatches, match)
- continue
- }
+ if matches != nil { // skip first iteration
+ for _, match := range *matches {
+ if keepCompleteMatch := match.complete; keepCompleteMatch {
+ *newMatches = append(*newMatches, match)
+ continue
+ }
- // drop failed match
- currentMatchRune := match.body[currentRunePosition-match.start]
- if !s.initialismRuneEqual(currentMatchRune, currentRune) {
- continue
- }
+ // drop failed match
+ currentMatchRune := match.body[currentRunePosition-match.start]
+ if currentMatchRune != currentRune {
+ continue
+ }
- // try to complete ongoing match
- if currentRunePosition-match.start == len(match.body)-1 {
- // we are close; the next step is to check the symbol ahead
- // if it is a small letter, then it is not the end of match
- // but beginning of the next word
-
- if currentRunePosition < len(nameRunes)-1 {
- nextRune := nameRunes[currentRunePosition+1]
- if newWord := unicode.IsLower(nextRune); newWord {
- // oh ok, it was the start of a new word
- continue
+ // try to complete ongoing match
+ if currentRunePosition-match.start == len(match.body)-1 {
+ // we are close; the next step is to check the symbol ahead
+ // if it is a small letter, then it is not the end of match
+ // but beginning of the next word
+
+ if currentRunePosition < len(nameRunes)-1 {
+ nextRune := nameRunes[currentRunePosition+1]
+ if newWord := unicode.IsLower(nextRune); newWord {
+ // oh ok, it was the start of a new word
+ continue
+ }
}
+
+ match.complete = true
+ match.end = currentRunePosition
}
- match.complete = true
- match.end = currentRunePosition
+ *newMatches = append(*newMatches, match)
}
-
- newMatches = append(newMatches, match)
}
// check for new initialism matches
- for _, initialism := range s.initialisms {
- initialismRunes := []rune(initialism)
- if s.initialismRuneEqual(initialismRunes[0], currentRune) {
- newMatches = append(newMatches, &initialismMatch{
+ for i := range s.initialisms {
+ initialismRunes := s.initialismsRunes[i]
+ if initialismRunes[0] == currentRune {
+ *newMatches = append(*newMatches, initialismMatch{
start: currentRunePosition,
body: initialismRunes,
complete: false,
@@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
}
}
+ if matches != nil {
+ poolOfMatches.RedeemMatches(matches)
+ }
matches = newMatches
}
+ // up to the caller to redeem this last slice
return matches
}
-func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
- nameLexems := make([]nameLexem, 0)
+func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem {
+ nameLexems := poolOfLexems.BorrowLexems()
- var lastAcceptedMatch *initialismMatch
- for _, match := range matches {
+ var lastAcceptedMatch initialismMatch
+ for _, match := range *matches {
if !match.complete {
continue
}
- if firstMatch := lastAcceptedMatch == nil; firstMatch {
- nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ if firstMatch := lastAcceptedMatch.isZero(); firstMatch {
+ s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start])
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
@@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa
}
middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
- nameLexems = append(nameLexems, s.breakCasualString(middle)...)
- nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+ s.appendBrokenDownCasualString(nameLexems, middle)
+ *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body)))
lastAcceptedMatch = match
}
// we have not found any accepted matches
- if lastAcceptedMatch == nil {
- return s.breakCasualString(nameRunes)
- }
-
- if lastAcceptedMatch.end+1 != len(nameRunes) {
+ if lastAcceptedMatch.isZero() {
+ *nameLexems = (*nameLexems)[:0]
+ s.appendBrokenDownCasualString(nameLexems, nameRunes)
+ } else if lastAcceptedMatch.end+1 != len(nameRunes) {
rest := nameRunes[lastAcceptedMatch.end+1:]
- nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+ s.appendBrokenDownCasualString(nameLexems, rest)
}
- return nameLexems
-}
+ poolOfMatches.RedeemMatches(matches)
-func (s *splitter) initialismRuneEqual(a, b rune) bool {
- return a == b
+ return nameLexems
}
-func (s *splitter) breakInitialism(original string) nameLexem {
+func (s splitter) breakInitialism(original string) nameLexem {
return newInitialismNameLexem(original, original)
}
-func (s *splitter) breakCasualString(str []rune) []nameLexem {
- segments := make([]nameLexem, 0)
- currentSegment := ""
+func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) {
+ currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused
+ defer func() {
+ poolOfBuffers.RedeemBuffer(currentSegment)
+ }()
addCasualNameLexem := func(original string) {
- segments = append(segments, newCasualNameLexem(original))
+ *segments = append(*segments, newCasualNameLexem(original))
}
addInitialismNameLexem := func(original, match string) {
- segments = append(segments, newInitialismNameLexem(original, match))
+ *segments = append(*segments, newInitialismNameLexem(original, match))
}
- addNameLexem := func(original string) {
- if s.postSplitInitialismCheck {
- for _, initialism := range s.initialisms {
- if upper(initialism) == upper(original) {
- addInitialismNameLexem(original, initialism)
+ var addNameLexem func(string)
+ if s.postSplitInitialismCheck {
+ addNameLexem = func(original string) {
+ for i := range s.initialisms {
+ if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) {
+ addInitialismNameLexem(original, s.initialisms[i])
+
return
}
}
- }
- addCasualNameLexem(original)
+ addCasualNameLexem(original)
+ }
+ } else {
+ addNameLexem = addCasualNameLexem
}
- for _, rn := range string(str) {
- if replace, found := nameReplaceTable[rn]; found {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ for _, rn := range str {
+ if replace, found := nameReplaceTable(rn); found {
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
if replace != "" {
@@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem {
}
if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
- currentSegment = ""
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
+ currentSegment.Reset()
}
continue
}
if unicode.IsUpper(rn) {
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
- currentSegment = ""
+ currentSegment.Reset()
}
- currentSegment += string(rn)
+ currentSegment.WriteRune(rn)
+ }
+
+ if currentSegment.Len() > 0 {
+ addNameLexem(currentSegment.String())
}
+}
+
+// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but
+// it ignores leading and trailing blank spaces in the compared
+// string.
+//
+// base is assumed to be composed of upper-cased runes, and be already
+// trimmed.
+//
+// This code is heavily inspired from strings.EqualFold.
+func isEqualFoldIgnoreSpace(base []rune, str string) bool {
+ var i, baseIndex int
+ // equivalent to b := []byte(str), but without data copy
+ b := hackStringBytes(str)
+
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ break
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ break
+ }
+ i += size
+ }
+
+ if i >= len(b) {
+ return len(base) == 0
+ }
+
+ for _, baseRune := range base {
+ if i >= len(b) {
+ break
+ }
+
+ if c := b[i]; c < utf8.RuneSelf {
+ // single byte rune case (ASCII)
+ if baseRune >= utf8.RuneSelf {
+ return false
+ }
+
+ baseChar := byte(baseRune)
+ if c != baseChar &&
+ !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) {
+ return false
+ }
+
+ baseIndex++
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if unicode.ToUpper(r) != baseRune {
+ return false
+ }
+ baseIndex++
+ i += size
+ }
+
+ if baseIndex != len(base) {
+ return false
+ }
+
+ // all passed: now we should only have blanks
+ for i < len(b) {
+ if c := b[i]; c < utf8.RuneSelf {
+ // fast path for ASCII
+ if c != ' ' && c != '\t' {
+ return false
+ }
+ i++
+
+ continue
+ }
+
+ // unicode case
+ r, size := utf8.DecodeRune(b[i:])
+ if !unicode.IsSpace(r) {
+ return false
+ }
- if currentSegment != "" {
- addNameLexem(currentSegment)
+ i += size
}
- return segments
+ return true
}
diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go
new file mode 100644
index 000000000..90745d5ca
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/string_bytes.go
@@ -0,0 +1,8 @@
+package swag
+
+import "unsafe"
+
+// hackStringBytes returns the (unsafe) underlying bytes slice of a string.
+func hackStringBytes(str string) []byte {
+ return unsafe.Slice(unsafe.StringData(str), len(str))
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
index d971fbe34..5051401c4 100644
--- a/vendor/github.com/go-openapi/swag/util.go
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -18,76 +18,25 @@ import (
"reflect"
"strings"
"unicode"
+ "unicode/utf8"
)
-// commonInitialisms are common acronyms that are kept as whole uppercased words.
-var commonInitialisms *indexOfInitialisms
-
-// initialisms is a slice of sorted initialisms
-var initialisms []string
-
-var isInitialism func(string) bool
-
// GoNamePrefixFunc sets an optional rule to prefix go names
// which do not start with a letter.
//
+// The prefix function is assumed to return a string that starts with an upper case letter.
+//
// e.g. to help convert "123" into "{prefix}123"
//
// The default is to prefix with "X"
var GoNamePrefixFunc func(string) string
-func init() {
- // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
- var configuredInitialisms = map[string]bool{
- "ACL": true,
- "API": true,
- "ASCII": true,
- "CPU": true,
- "CSS": true,
- "DNS": true,
- "EOF": true,
- "GUID": true,
- "HTML": true,
- "HTTPS": true,
- "HTTP": true,
- "ID": true,
- "IP": true,
- "IPv4": true,
- "IPv6": true,
- "JSON": true,
- "LHS": true,
- "OAI": true,
- "QPS": true,
- "RAM": true,
- "RHS": true,
- "RPC": true,
- "SLA": true,
- "SMTP": true,
- "SQL": true,
- "SSH": true,
- "TCP": true,
- "TLS": true,
- "TTL": true,
- "UDP": true,
- "UI": true,
- "UID": true,
- "UUID": true,
- "URI": true,
- "URL": true,
- "UTF8": true,
- "VM": true,
- "XML": true,
- "XMPP": true,
- "XSRF": true,
- "XSS": true,
+func prefixFunc(name, in string) string {
+ if GoNamePrefixFunc == nil {
+ return "X" + in
}
- // a thread-safe index of initialisms
- commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
- initialisms = commonInitialisms.sorted()
-
- // a test function
- isInitialism = commonInitialisms.isInitialism
+ return GoNamePrefixFunc(name) + in
}
const (
@@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string {
return result
}
-type byInitialism []string
-
-func (s byInitialism) Len() int {
- return len(s)
-}
-func (s byInitialism) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-func (s byInitialism) Less(i, j int) bool {
- if len(s[i]) != len(s[j]) {
- return len(s[i]) < len(s[j])
- }
-
- return strings.Compare(s[i], s[j]) > 0
-}
-
// Removes leading whitespaces
func trim(str string) string {
- return strings.Trim(str, " ")
+ return strings.TrimSpace(str)
}
// Shortcut to strings.ToUpper()
@@ -188,15 +121,20 @@ func lower(str string) string {
}
// Camelize an uppercased word
-func Camelize(word string) (camelized string) {
+func Camelize(word string) string {
+ camelized := poolOfBuffers.BorrowBuffer(len(word))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(camelized)
+ }()
+
for pos, ru := range []rune(word) {
if pos > 0 {
- camelized += string(unicode.ToLower(ru))
+ camelized.WriteRune(unicode.ToLower(ru))
} else {
- camelized += string(unicode.ToUpper(ru))
+ camelized.WriteRune(unicode.ToUpper(ru))
}
}
- return
+ return camelized.String()
}
// ToFileName lowercases and underscores a go type name
@@ -224,33 +162,40 @@ func ToCommandName(name string) string {
// ToHumanNameLower represents a code name as a human series of words
func ToHumanNameLower(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
- out := make([]string, 0, len(in))
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ out := make([]string, 0, len(*in))
- for _, w := range in {
+ for _, w := range *in {
if !w.IsInitialism() {
out = append(out, lower(w.GetOriginal()))
} else {
- out = append(out, w.GetOriginal())
+ out = append(out, trim(w.GetOriginal()))
}
}
+ poolOfLexems.RedeemLexems(in)
return strings.Join(out, " ")
}
// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
func ToHumanNameTitle(name string) string {
- in := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ in := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
- out := make([]string, 0, len(in))
- for _, w := range in {
- original := w.GetOriginal()
+ out := make([]string, 0, len(*in))
+ for _, w := range *in {
+ original := trim(w.GetOriginal())
if !w.IsInitialism() {
out = append(out, Camelize(original))
} else {
out = append(out, original)
}
}
+ poolOfLexems.RedeemLexems(in)
+
return strings.Join(out, " ")
}
@@ -264,7 +209,7 @@ func ToJSONName(name string) string {
out = append(out, lower(w))
continue
}
- out = append(out, Camelize(w))
+ out = append(out, Camelize(trim(w)))
}
return strings.Join(out, "")
}
@@ -283,35 +228,70 @@ func ToVarName(name string) string {
// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
func ToGoName(name string) string {
- lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+ s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck)
+ lexems := s.split(name)
+ poolOfSplitters.RedeemSplitter(s)
+ defer func() {
+ poolOfLexems.RedeemLexems(lexems)
+ }()
+ lexemes := *lexems
+
+ if len(lexemes) == 0 {
+ return ""
+ }
+
+ result := poolOfBuffers.BorrowBuffer(len(name))
+ defer func() {
+ poolOfBuffers.RedeemBuffer(result)
+ }()
+
+ // check if not starting with a letter, upper case
+ firstPart := lexemes[0].GetUnsafeGoName()
+ if lexemes[0].IsInitialism() {
+ firstPart = upper(firstPart)
+ }
+
+ if c := firstPart[0]; c < utf8.RuneSelf {
+ // ASCII
+ switch {
+ case 'A' <= c && c <= 'Z':
+ result.WriteString(firstPart)
+ case 'a' <= c && c <= 'z':
+ result.WriteByte(c - 'a' + 'A')
+ result.WriteString(firstPart[1:])
+ default:
+ result.WriteString(prefixFunc(name, firstPart))
+ // NOTE: no longer check if prefixFunc returns a string that starts with uppercase:
+ // assume this is always the case
+ }
+ } else {
+ // unicode
+ firstRune, _ := utf8.DecodeRuneInString(firstPart)
+ switch {
+ case !unicode.IsLetter(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ case !unicode.IsUpper(firstRune):
+ result.WriteString(prefixFunc(name, firstPart))
+ /*
+ result.WriteRune(unicode.ToUpper(firstRune))
+ result.WriteString(firstPart[offset:])
+ */
+ default:
+ result.WriteString(firstPart)
+ }
+ }
- result := ""
- for _, lexem := range lexems {
+ for _, lexem := range lexemes[1:] {
goName := lexem.GetUnsafeGoName()
// to support old behavior
if lexem.IsInitialism() {
goName = upper(goName)
}
- result += goName
+ result.WriteString(goName)
}
- if len(result) > 0 {
- // Only prefix with X when the first character isn't an ascii letter
- first := []rune(result)[0]
- if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
- if GoNamePrefixFunc == nil {
- return "X" + result
- }
- result = GoNamePrefixFunc(name) + result
- }
- first = []rune(result)[0]
- if unicode.IsLetter(first) && !unicode.IsUpper(first) {
- result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
- }
- }
-
- return result
+ return result.String()
}
// ContainsStrings searches a slice of strings for a case-sensitive match
@@ -343,7 +323,7 @@ type zeroable interface {
func IsZero(data interface{}) bool {
v := reflect.ValueOf(data)
// check for nil data
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
if v.IsNil() {
return true
@@ -356,7 +336,7 @@ func IsZero(data interface{}) bool {
}
// continue with slightly more complex reflection
- switch v.Kind() {
+ switch v.Kind() { //nolint:exhaustive
case reflect.String:
return v.Len() == 0
case reflect.Bool:
@@ -376,16 +356,6 @@ func IsZero(data interface{}) bool {
}
}
-// AddInitialisms add additional initialisms
-func AddInitialisms(words ...string) {
- for _, word := range words {
- // commonInitialisms[upper(word)] = true
- commonInitialisms.add(upper(word))
- }
- // sort again
- initialisms = commonInitialisms.sorted()
-}
-
// CommandLineOptionsGroup represents a group of user-defined command line options
type CommandLineOptionsGroup struct {
ShortDescription string
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
index f09ee609f..f59e02593 100644
--- a/vendor/github.com/go-openapi/swag/yaml.go
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -16,8 +16,11 @@ package swag
import (
"encoding/json"
+ "errors"
"fmt"
"path/filepath"
+ "reflect"
+ "sort"
"strconv"
"github.com/mailru/easyjson/jlexer"
@@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) {
return nil, err
}
if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode {
- return nil, fmt.Errorf("only YAML documents that are objects are supported")
+ return nil, errors.New("only YAML documents that are objects are supported")
}
return &document, nil
}
@@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) {
case yamlTimestamp:
return node.Value, nil
case yamlNull:
- return nil, nil
+ return nil, nil //nolint:nilnil
default:
return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag())
}
@@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) {
return yaml.Marshal(&n)
}
+func isNil(input interface{}) bool {
+ if input == nil {
+ return true
+ }
+ kind := reflect.TypeOf(input).Kind()
+ switch kind { //nolint:exhaustive
+ case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan:
+ return reflect.ValueOf(input).IsNil()
+ default:
+ return false
+ }
+}
+
func json2yaml(item interface{}) (*yaml.Node, error) {
+ if isNil(item) {
+ return &yaml.Node{
+ Kind: yaml.ScalarNode,
+ Value: "null",
+ }, nil
+ }
+
switch val := item.(type) {
case JSONMapSlice:
var n yaml.Node
@@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
case map[string]interface{}:
var n yaml.Node
n.Kind = yaml.MappingNode
- for k, v := range val {
+ keys := make([]string, 0, len(val))
+ for k := range val {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := val[k]
childNode, err := json2yaml(v)
if err != nil {
return nil, err
@@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) {
Tag: yamlBoolScalar,
Value: strconv.FormatBool(val),
}, nil
+ default:
+ return nil, fmt.Errorf("unhandled type: %T", val)
}
- return nil, nil
}
// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md
index 750c3c7eb..e809c79ab 100644
--- a/vendor/github.com/huandu/xstrings/README.md
+++ b/vendor/github.com/huandu/xstrings/README.md
@@ -39,8 +39,8 @@ _Keep this table sorted by Function in ascending order._
| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) |
| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) |
| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) |
-| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
-| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
+| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
+| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) |
| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) |
| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) |
@@ -50,14 +50,15 @@ _Keep this table sorted by Function in ascending order._
| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) |
| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) |
| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) |
-| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
-| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
+| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
+| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) |
| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) |
| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) |
| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) |
| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) |
| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) |
+| [ToPascalCase](https://godoc.org/github.com/huandu/xstrings#ToPascalCase) | - | [#1](https://github.com/huandu/xstrings/issues/1) |
| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) |
| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) |
| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) |
diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go
index cba0d0725..5d8cfee47 100644
--- a/vendor/github.com/huandu/xstrings/convert.go
+++ b/vendor/github.com/huandu/xstrings/convert.go
@@ -13,17 +13,37 @@ import (
//
// Some samples.
//
+// "some_words" => "someWords"
+// "http_server" => "httpServer"
+// "no_https" => "noHttps"
+// "_complex__case_" => "_complex_Case_"
+// "some words" => "someWords"
+// "GOLANG_IS_GREAT" => "golangIsGreat"
+func ToCamelCase(str string) string {
+ return toCamelCase(str, false)
+}
+
+// ToPascalCase is to convert words separated by space, underscore and hyphen to pascal case.
+//
+// Some samples.
+//
// "some_words" => "SomeWords"
// "http_server" => "HttpServer"
// "no_https" => "NoHttps"
// "_complex__case_" => "_Complex_Case_"
// "some words" => "SomeWords"
-func ToCamelCase(str string) string {
+// "GOLANG_IS_GREAT" => "GolangIsGreat"
+func ToPascalCase(str string) string {
+ return toCamelCase(str, true)
+}
+
+func toCamelCase(str string, isBig bool) string {
if len(str) == 0 {
return ""
}
buf := &stringBuilder{}
+ var isFirstRuneUpper bool
var r0, r1 rune
var size int
@@ -33,7 +53,14 @@ func ToCamelCase(str string) string {
str = str[size:]
if !isConnector(r0) {
- r0 = unicode.ToUpper(r0)
+ isFirstRuneUpper = unicode.IsUpper(r0)
+
+ if isBig {
+ r0 = unicode.ToUpper(r0)
+ } else {
+ r0 = unicode.ToLower(r0)
+ }
+
break
}
@@ -60,12 +87,25 @@ func ToCamelCase(str string) string {
}
if isConnector(r1) {
+ isFirstRuneUpper = unicode.IsUpper(r0)
r0 = unicode.ToUpper(r0)
} else {
+ if isFirstRuneUpper {
+ if unicode.IsUpper(r0) {
+ r0 = unicode.ToLower(r0)
+ } else {
+ isFirstRuneUpper = false
+ }
+ }
+
buf.WriteRune(r1)
}
}
+ if isFirstRuneUpper && !isBig {
+ r0 = unicode.ToLower(r0)
+ }
+
buf.WriteRune(r0)
return buf.String()
}
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index 4c28dff46..a22953805 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -3,7 +3,6 @@
before:
hooks:
- ./gen.sh
- - go install mvdan.cc/garble@v0.10.1
builds:
-
@@ -32,7 +31,6 @@ builds:
- mips64le
goarm:
- 7
- gobinary: garble
-
id: "s2d"
binary: s2d
@@ -59,7 +57,6 @@ builds:
- mips64le
goarm:
- 7
- gobinary: garble
-
id: "s2sx"
binary: s2sx
@@ -87,7 +84,6 @@ builds:
- mips64le
goarm:
- 7
- gobinary: garble
archives:
-
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 43de48677..05c7359e4 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,38 @@ This package provides various compression algorithms.
# changelog
+* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
+ * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
+ * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
+
+* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)
+ * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912
+ * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908
+ * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913
+ * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910
+ * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917
+https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918
+
+* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)
+ * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887
+ * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886
+ * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892
+ * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890
+ * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891
+
+* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)
+ * fse: Fix max header size https://github.com/klauspost/compress/pull/881
+ * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877
+ * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883
+
+* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
+ * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
+
+* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
+ * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
+ * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
+ * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
+
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
@@ -23,6 +55,10 @@ This package provides various compression algorithms.
* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+
+
+ See changes to v1.16.x
+
* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
@@ -61,6 +97,7 @@ This package provides various compression algorithms.
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
+
See changes to v1.15.x
@@ -528,6 +565,8 @@ the stateless compress described below.
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+To disable all assembly add `-tags=noasm`. This works across all packages.
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
@@ -546,7 +585,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
-```
+```go
// replace 'ioutil.Discard' with your output.
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index de912e187..66d1657d2 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
// Should only be used after a start/reset.
func (d *compressor) fillWindow(b []byte) {
// Do not fill window if we are in store-only or huffman mode.
- if d.level <= 0 {
+ if d.level <= 0 && d.level > -MinCustomWindowSize {
return
}
if d.fast != nil {
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
index 9a7655c0f..0782b86e3 100644
--- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index 65d777357..074018d8f 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error {
previous0 bool
charnum uint16
- maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+ maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size
bitStream = uint32(tableLog - minTablelog)
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
deleted file mode 100644
index 4dcab8d23..000000000
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 Klaus Post. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
-
-package huff0
-
-// byteReader provides a byte reader that reads
-// little endian values from a byte stream.
-// The input stream is manually advanced.
-// The reader performs no bounds checks.
-type byteReader struct {
- b []byte
- off int
-}
-
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
- b.b = in
- b.off = 0
-}
-
-// Int32 returns a little endian int32 starting at current offset.
-func (b byteReader) Int32() int32 {
- v3 := int32(b.b[b.off+3])
- v2 := int32(b.b[b.off+2])
- v1 := int32(b.b[b.off+1])
- v0 := int32(b.b[b.off])
- return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// Uint32 returns a little endian uint32 starting at current offset.
-func (b byteReader) Uint32() uint32 {
- v3 := uint32(b.b[b.off+3])
- v2 := uint32(b.b[b.off+2])
- v1 := uint32(b.b[b.off+1])
- v0 := uint32(b.b[b.off])
- return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// remain will return the number of bytes remaining.
-func (b byteReader) remain() int {
- return len(b.b) - b.off
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 518436cf3..84aa3d12f 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -350,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
// Does not update s.clearCount.
func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
reuse = true
+ _ = s.count // Assert that s != nil to speed up the following loop.
for _, v := range in {
s.count[v]++
}
@@ -415,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool {
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
- minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+ minBitsSrc := highBit32(uint32(s.srcLen)) + 1
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
@@ -427,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 {
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
- maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+ maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index e8ad17ad0..77ecd68e0 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -88,7 +88,7 @@ type Scratch struct {
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
- br byteReader
+ srcLen int
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
@@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
if s.fse == nil {
s.fse = &fse.Scratch{}
}
- s.br.init(in)
+ s.srcLen = len(in)
return s, nil
}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 2aa6a95a0..2754bac6f 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
- // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // length emitted down below is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 2263853fc..5a4412f90 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,4 @@
module github.com/klauspost/compress
-go 1.16
+go 1.19
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index bdd49c8b2..92e2347bb 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
## Decompressor
-Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 9f17ce601..03744fbc7 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
if debugDecoder {
printf("Compression modes: 0b%b", compMode)
}
+ if compMode&3 != 0 {
+ return errors.New("corrupt block: reserved bits not zero")
+ }
for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 2cfe925ad..32a7f401d 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error {
return nil
}
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(length)
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, val)
+}
+
// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder(data []byte) int {
if len(data) > maxSequences || len(data) < 2 {
@@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
if len(b.sequences) == 0 {
return b.encodeLits(b.literals, rawAllLits)
}
+ if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+ // Check common RLE cases.
+ seq := b.sequences[0]
+ if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+ // Offset == 1 and 0 or 1 literals.
+ b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+ return nil
+ }
+ }
+
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index f6a240970..6a5a2988b 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -95,42 +95,54 @@ type Header struct {
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
+ _, err := h.DecodeAndStrip(in)
+ return err
+}
+
+// DecodeAndStrip will decode the header from the beginning of the stream
+// and on success return the remaining bytes.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
*h = Header{}
if len(in) < 4 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
b, in := in[:4], in[4:]
if string(b) != frameMagic {
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
- return ErrMagicMismatch
+ return nil, ErrMagicMismatch
}
if len(in) < 4 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
h.Skippable = true
h.SkippableID = int(b[0] & 0xf)
h.SkippableSize = binary.LittleEndian.Uint32(in)
- return nil
+ return in[4:], nil
}
// Read Window_Descriptor
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
fhd, in := in[0], in[1:]
h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
if fhd&(1<<3) != 0 {
- return errors.New("reserved bit set on frame header")
+ return nil, errors.New("reserved bit set on frame header")
}
if !h.SingleSegment {
if len(in) < 1 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
@@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error {
size = 4
}
if len(in) < int(size) {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
h.HeaderSize += int(size)
@@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error {
if fcsSize > 0 {
h.HasFCS = true
if len(in) < fcsSize {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize)
@@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error {
// Frame Header done, we will not fail from now on.
if len(in) < 3 {
- return nil
+ return in, nil
}
tmp := in[:3]
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
@@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error {
cSize := int(bh >> 3)
switch blockType {
case blockTypeReserved:
- return nil
+ return in, nil
case blockTypeRLE:
h.FirstBlock.Compressed = true
h.FirstBlock.DecompressedSize = cSize
@@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error {
}
h.FirstBlock.OK = true
- return nil
+ return in, nil
+}
+
+// AppendTo will append the encoded header to the dst slice.
+// There is no error checking performed on the header values.
+func (h *Header) AppendTo(dst []byte) ([]byte, error) {
+ if h.Skippable {
+ magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
+ magic[0] |= byte(h.SkippableID & 0xf)
+ dst = append(dst, magic[:]...)
+ f := h.SkippableSize
+ return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
+ }
+ f := frameHeader{
+ ContentSize: h.FrameContentSize,
+ WindowSize: uint32(h.WindowSize),
+ SingleSegment: h.SingleSegment,
+ Checksum: h.HasCheckSum,
+ DictID: h.DictionaryID,
+ }
+ return f.appendTo(dst), nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f04aaa21e..bbca17234 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -82,7 +82,7 @@ var (
// can run multiple concurrent stateless decodes. It is even possible to
// use stateless decodes while a stream is being decoded.
//
-// The Reset function can be used to initiate a new stream, which is will considerably
+// The Reset function can be used to initiate a new stream, which will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
initPredefined()
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index 8d5567fe6..b7b83164b 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
enc.Encode(&block, b)
addValues(&remain, block.literals)
litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
seqs += len(block.sequences)
block.genCodes()
addHist(&ll, block.coders.llEnc.Histogram())
@@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if offset == 0 {
continue
}
+ if int(offset) >= len(o.History) {
+ continue
+ }
if offset > 3 {
newOffsets[offset-3]++
} else {
@@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
if seqs/nUsed < 512 {
// Use 512 as minimum.
nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
}
copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
hist := dst.Histogram()
@@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) {
fakeLength += v
hist[i] = uint32(v)
}
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
dst.HistogramFinished(maxSym, maxCount)
dst.reUsed = false
dst.useRLE = false
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 858f8f43a..4613724e9 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
- ofc = ofCode(uint32(m.rep))
+ ofc = ofCode(uint32(m.rep) & 3)
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
break
}
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
@@ -201,14 +213,6 @@ encodeLoop:
if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
return
}
- if debugAsserts {
- if offset >= s {
- panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
- }
- if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
- panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
- }
- }
// Try to quick reject if we already have a long match.
if m.length > 16 {
left := len(src) - int(m.s+m.length)
@@ -227,8 +231,10 @@ encodeLoop:
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
- if rep < 0 {
+ if m.rep <= 0 {
// Extend candidate match backwards as far as possible.
+ // Do not extend repeats as we can assume they are optimal
+ // and offsets change if s == nextEmit.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
@@ -239,7 +245,14 @@ encodeLoop:
l++
}
}
-
+ if debugAsserts {
+ if offset >= s {
+ panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
+ }
+ if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
+ panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+ }
+ }
cand := match{offset: offset, s: s, length: l, rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
@@ -282,6 +295,7 @@ encodeLoop:
// Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+ index0 := s + 1
// Look far ahead, unless we have a really long match already...
if best.length < goodEnough {
@@ -335,41 +349,45 @@ encodeLoop:
}
if debugAsserts {
+ if best.offset >= best.s {
+ panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
+ }
+ if best.s < nextEmit {
+ panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
+ }
+ if best.offset < s-e.maxMatchOff {
+ panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
+ }
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
}
}
// We have a match, we can store the forward value
+ s = best.s
if best.rep > 0 {
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
- if debugAsserts && s < nextEmit {
- panic("s < nextEmit")
- }
addLiterals(&seq, best.s)
// Repeat. If bit 4 is set, this is a non-lit repeat.
seq.offset = uint32(best.rep & 3)
if debugSequences {
- println("repeat sequence", seq, "next s:", s)
+ println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
}
blk.sequences = append(blk.sequences, seq)
// Index old s + 1 -> s - 1
- index0 := s + 1
s = best.s + best.length
-
nextEmit = s
- if s >= sLimit {
- if debugEncoder {
- println("repeat ended", s, best.length)
- }
- break encodeLoop
- }
+
// Index skipped...
+ end := s
+ if s > sLimit+4 {
+ end = sLimit + 4
+ }
off := index0 + e.cur
- for index0 < s {
+ for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -378,6 +396,7 @@ encodeLoop:
off++
index0++
}
+
switch best.rep {
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
@@ -386,13 +405,17 @@ encodeLoop:
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, best.length)
+ }
+ break encodeLoop
+ }
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
- index0 := s + 1
- s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@@ -419,19 +442,25 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
- if s >= sLimit {
- break encodeLoop
+
+ // Index old s + 1 -> s - 1 or sLimit
+ end := s
+ if s > sLimit-4 {
+ end = sLimit - 4
}
- // Index old s + 1 -> s - 1
- for index0 < s {
+ off := index0 + e.cur
+ for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
- off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
+ off++
+ }
+ if s >= sLimit {
+ break encodeLoop
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index 8582f31a7..a4f5bf91f 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
e.cur = e.maxMatchOff
break
}
-
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
@@ -145,7 +156,7 @@ encodeLoop:
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
- var matched int32
+ var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@@ -162,6 +173,7 @@ encodeLoop:
off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+ index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@@ -258,7 +270,6 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- index0 := s + repOff2
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
@@ -498,15 +509,15 @@ encodeLoop:
}
// Index match start+1 (long) -> s - 1
- index0 := s - l + 1
+ off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
- off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
+ off += 2
}
cv = load6432(src, s)
@@ -672,7 +683,7 @@ encodeLoop:
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
- var matched int32
+ var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@@ -691,6 +702,7 @@ encodeLoop:
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS)
+ index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@@ -726,7 +738,6 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
- index0 := s + repOff
s += lenght + repOff
nextEmit = s
@@ -790,7 +801,6 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- index0 := s + repOff2
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
@@ -1024,18 +1034,18 @@ encodeLoop:
}
// Index match start+1 (long) -> s - 1
- index0 := s - l + 1
+ off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
- off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
+ off += 2
}
cv = load6432(src, s)
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index faaf81921..20671dcb9 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption {
// The value must be a power of two between MinWindowSize and MaxWindowSize.
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
-// The default value is determined by the compression level.
+// The default value is determined by the compression level and max 8MB.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
@@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
- o.windowSize = 16 << 20
+ o.windowSize = 8 << 20
case SpeedBestCompression:
- o.windowSize = 32 << 20
+ o.windowSize = 8 << 20
}
}
if !o.customALEntropy {
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index 2f5d5ed45..667ca0679 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte {
if f.SingleSegment {
dst = append(dst, uint8(f.ContentSize))
}
- // Unless SingleSegment is set, framessizes < 256 are nto stored.
+ // Unless SingleSegment is set, framessizes < 256 are not stored.
case 1:
f.ContentSize -= 256
dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
index 332e51fe4..8adfebb02 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error {
if v == -1 {
s.dt[highThreshold].setAddBits(uint8(i))
highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
+ v = 1
}
+ symbolNext[i] = uint16(v)
}
}
@@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error {
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
+ for {
// lowprob area
position = (position + step) & tableMask
+ if position <= highThreshold {
+ break
+ }
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 17901e080..ae7d4d329 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -162,12 +162,12 @@ finalize:
MOVD h, ret+24(FP)
RET
-// func writeBlocks(d *Digest, b []byte) int
+// func writeBlocks(s *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
- MOVD d+0(FP), digest
+ MOVD s+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
index 9a7655c0f..0782b86e3 100644
--- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -5,7 +5,6 @@
#include "textflag.h"
// func matchLen(a []byte, b []byte) int
-// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
JB matchlen_match4_standalone
matchlen_loopback_standalone:
- MOVQ (AX)(SI*1), BX
- XORQ (CX)(SI*1), BX
- TESTQ BX, BX
- JZ matchlen_loop_standalone
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
#ifdef GOAMD64_v3
TZCNTQ BX, BX
#else
BSFQ BX, BX
#endif
- SARQ $0x03, BX
+ SHRL $0x03, BX
LEAL (SI)(BX*1), SI
JMP gen_match_len_end
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 974b99725..5b06174b8 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R14
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R14
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R14
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R14
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R14
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R14
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end:
BZHIQ R14, R15, R15
// Update Offset State
- BZHIQ R8, R15, CX
- SHRXQ R8, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R15, CX
- SHRXQ DI, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R15, CX
- MOVQ $0x00001010, R14
- BEXTRQ R14, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end:
BZHIQ R14, R15, R15
// Update Offset State
- BZHIQ R8, R15, CX
- SHRXQ R8, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R15, CX
- SHRXQ DI, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R15, CX
- MOVQ $0x00001010, R14
- BEXTRQ R14, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R13
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R13
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R13
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
BZHIQ R13, R14, R14
// Update Offset State
- BZHIQ R8, R14, CX
- SHRXQ R8, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R14, CX
- SHRXQ DI, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R14, CX
- MOVQ $0x00001010, R13
- BEXTRQ R13, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
// Update Literal Length State
MOVBQZX DI, R13
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
// Update Match Length State
MOVBQZX R8, R13
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero:
// Update Offset State
MOVBQZX R9, R13
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
BZHIQ R13, R14, R14
// Update Offset State
- BZHIQ R8, R14, CX
- SHRXQ R8, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R14, CX
- SHRXQ DI, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R14, CX
- MOVQ $0x00001010, R13
- BEXTRQ R13, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE
deleted file mode 100644
index 5d8cb5b72..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore
deleted file mode 100644
index e16fb946b..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile
deleted file mode 100644
index 81be21437..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
- go test -cover -v -coverprofile=cover.dat ./...
- go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go
deleted file mode 100644
index 7c08e564f..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "errors"
- "io"
-
- "google.golang.org/protobuf/proto"
-)
-
-// TODO: Give error package name prefix in next minor release.
-var errInvalidVarint = errors.New("invalid varint32 encountered")
-
-// ReadDelimited decodes a message from the provided length-delimited stream,
-// where the length is encoded as 32-bit varint prefix to the message body.
-// It returns the total number of bytes read and any applicable error. This is
-// roughly equivalent to the companion Java API's
-// MessageLite#parseDelimitedFrom. As per the reader contract, this function
-// calls r.Read repeatedly as required until exactly one message including its
-// prefix is read and decoded (or an error has occurred). The function never
-// reads more bytes from the stream than required. The function never returns
-// an error if a message has been read and decoded correctly, even if the end
-// of the stream has been reached in doing so. In that case, any subsequent
-// calls return (0, io.EOF).
-func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
- // TODO: Consider allowing the caller to specify a decode buffer in the
- // next major version.
-
- // TODO: Consider using error wrapping to annotate error state in pass-
- // through cases in the next minor version.
-
- // Per AbstractParser#parsePartialDelimitedFrom with
- // CodedInputStream#readRawVarint32.
- var headerBuf [binary.MaxVarintLen32]byte
- var bytesRead, varIntBytes int
- var messageLength uint64
- for varIntBytes == 0 { // i.e. no varint has been decoded yet.
- if bytesRead >= len(headerBuf) {
- return bytesRead, errInvalidVarint
- }
- // We have to read byte by byte here to avoid reading more bytes
- // than required. Each read byte is appended to what we have
- // read before.
- newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
- if newBytesRead == 0 {
- if err != nil {
- return bytesRead, err
- }
- // A Reader should not return (0, nil); but if it does, it should
- // be treated as no-op according to the Reader contract.
- continue
- }
- bytesRead += newBytesRead
- // Now present everything read so far to the varint decoder and
- // see if a varint can be decoded already.
- messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead])
- }
-
- messageBuf := make([]byte, messageLength)
- newBytesRead, err := io.ReadFull(r, messageBuf)
- bytesRead += newBytesRead
- if err != nil {
- return bytesRead, err
- }
-
- return bytesRead, proto.Unmarshal(messageBuf, m)
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go
deleted file mode 100644
index c318385cb..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go
deleted file mode 100644
index e58dd9d29..000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "io"
-
- "google.golang.org/protobuf/proto"
-)
-
-// WriteDelimited encodes and dumps a message to the provided writer prefixed
-// with a 32-bit varint indicating the length of the encoded message, producing
-// a length-delimited record stream, which can be used to chain together
-// encoded messages of the same type together in a file. It returns the total
-// number of bytes written and any applicable error. This is roughly
-// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
-func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
- // TODO: Consider allowing the caller to specify an encode buffer in the
- // next major version.
-
- buffer, err := proto.Marshal(m)
- if err != nil {
- return 0, err
- }
-
- var buf [binary.MaxVarintLen32]byte
- encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
-
- sync, err := w.Write(buf[:encodedLength])
- if err != nil {
- return sync, err
- }
-
- n, err = w.Write(buffer)
- return n + sync, err
-}
diff --git a/vendor/github.com/nats-io/nats.go/.travis.yml b/vendor/github.com/nats-io/nats.go/.travis.yml
deleted file mode 100644
index 9a6b4a89c..000000000
--- a/vendor/github.com/nats-io/nats.go/.travis.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-language: go
-go:
-- "1.22.x"
-- "1.21.x"
-go_import_path: github.com/nats-io/nats.go
-install:
-- go get -t ./...
-- curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then
- go install github.com/mattn/goveralls@latest;
- go install github.com/wadey/gocovmerge@latest;
- go install honnef.co/go/tools/cmd/staticcheck@latest;
- go install github.com/client9/misspell/cmd/misspell@latest;
- fi
-before_script:
-- $(exit $(go fmt ./... | wc -l))
-- go vet -modfile=go_test.mod ./...
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then
- find . -type f -name "*.go" | xargs misspell -error -locale US;
- GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...;
- fi
-- golangci-lint run ./jetstream/...
-script:
-- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off -tags=internal_testing; fi
-after_success:
-- if [[ "$TRAVIS_GO_VERSION" =~ 1.22 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi
-
-jobs:
- include:
- - name: "Go: 1.22.x (nats-server@main)"
- go: "1.22.x"
- before_script:
- - go get -modfile go_test.mod github.com/nats-io/nats-server/v2@main
- allow_failures:
- - name: "Go: 1.22.x (nats-server@main)"
diff --git a/vendor/github.com/nats-io/nats.go/README.md b/vendor/github.com/nats-io/nats.go/README.md
index fd64d93dc..fa087c34d 100644
--- a/vendor/github.com/nats-io/nats.go/README.md
+++ b/vendor/github.com/nats-io/nats.go/README.md
@@ -7,8 +7,8 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io
[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg
[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go
[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go
-[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go
-[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main
+[Build-Status-Url]: https://github.com/nats-io/nats.go/actions
+[Build-Status-Image]: https://github.com/nats-io/nats.go/actions/workflows/ci.yaml/badge.svg?branch=main
[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go
[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c
[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main
@@ -19,25 +19,14 @@ A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io
## Installation
```bash
-# Go client
-go get github.com/nats-io/nats.go/
+# To get the latest released Go client:
+go get github.com/nats-io/nats.go@latest
-# Server
-go get github.com/nats-io/nats-server
-```
-
-When using or transitioning to Go modules support:
-
-```bash
-# Go client latest or explicit version
-go get github.com/nats-io/nats.go/@latest
-go get github.com/nats-io/nats.go/@v1.36.0
-
-# For latest NATS Server, add /v2 at the end
-go get github.com/nats-io/nats-server/v2
+# To get a specific version:
+go get github.com/nats-io/nats.go@v1.39.0
-# NATS Server v1 is installed otherwise
-# go get github.com/nats-io/nats-server
+# Note that the latest major version for NATS Server is v2:
+go get github.com/nats-io/nats-server/v2@latest
```
## Basic Usage
@@ -93,11 +82,13 @@ nc.Close()
```
## JetStream
+[](https://pkg.go.dev/github.com/nats-io/nats.go/jetstream)
JetStream is the built-in NATS persistence system. `nats.go` provides a built-in
API enabling both managing JetStream assets as well as publishing/consuming
persistent messages.
+
### Basic usage
```go
@@ -134,60 +125,6 @@ To find more information on `nats.go` JetStream API, visit
The service API (`micro`) allows you to [easily build NATS services](micro/README.md) The
services API is currently in beta release.
-## Encoded Connections
-
-```go
-
-nc, _ := nats.Connect(nats.DefaultURL)
-c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
-defer c.Close()
-
-// Simple Publisher
-c.Publish("foo", "Hello World")
-
-// Simple Async Subscriber
-c.Subscribe("foo", func(s string) {
- fmt.Printf("Received a message: %s\n", s)
-})
-
-// EncodedConn can Publish any raw Go type using the registered Encoder
-type person struct {
- Name string
- Address string
- Age int
-}
-
-// Go type Subscriber
-c.Subscribe("hello", func(p *person) {
- fmt.Printf("Received a person: %+v\n", p)
-})
-
-me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"}
-
-// Go type Publisher
-c.Publish("hello", me)
-
-// Unsubscribe
-sub, err := c.Subscribe("foo", nil)
-// ...
-sub.Unsubscribe()
-
-// Requests
-var response string
-err = c.Request("help", "help me", &response, 10*time.Millisecond)
-if err != nil {
- fmt.Printf("Request failed: %v\n", err)
-}
-
-// Replying
-c.Subscribe("help", func(subj, reply string, msg string) {
- c.Publish(reply, "I can help!")
-})
-
-// Close connection
-c.Close();
-```
-
## New Authentication (Nkeys and User Credentials)
This requires server with version >= 2.0.0
@@ -267,34 +204,6 @@ if err != nil {
```
-## Using Go Channels (netchan)
-
-```go
-nc, _ := nats.Connect(nats.DefaultURL)
-ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
-defer ec.Close()
-
-type person struct {
- Name string
- Address string
- Age int
-}
-
-recvCh := make(chan *person)
-ec.BindRecvChan("hello", recvCh)
-
-sendCh := make(chan *person)
-ec.BindSendChan("hello", sendCh)
-
-me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"}
-
-// Send via Go channels
-sendCh <- me
-
-// Receive via Go channels
-who := <- recvCh
-```
-
## Wildcard Subscriptions
```go
@@ -461,17 +370,6 @@ msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar"))
sub, err := nc.SubscribeSync("foo")
msg, err := sub.NextMsgWithContext(ctx)
-// Encoded Request with context
-c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER)
-type request struct {
- Message string `json:"message"`
-}
-type response struct {
- Code int `json:"code"`
-}
-req := &request{Message: "Hello"}
-resp := &response{}
-err := c.RequestWithContext(ctx, "foo", req, resp)
```
## Backwards compatibility
diff --git a/vendor/github.com/nats-io/nats.go/context.go b/vendor/github.com/nats-io/nats.go/context.go
index 20f1782ac..382335e83 100644
--- a/vendor/github.com/nats-io/nats.go/context.go
+++ b/vendor/github.com/nats-io/nats.go/context.go
@@ -88,7 +88,7 @@ func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, dat
inbox := nc.NewInbox()
ch := make(chan *Msg, RequestChanLen)
- s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil)
+ s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, nil, true, nil)
if err != nil {
return nil, err
}
@@ -217,6 +217,8 @@ func (nc *Conn) FlushWithContext(ctx context.Context) error {
// RequestWithContext will create an Inbox and perform a Request
// using the provided cancellation context with the Inbox reply
// for the data v. A response will be decoded into the vPtr last parameter.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v any, vPtr any) error {
if ctx == nil {
return ErrInvalidContext
diff --git a/vendor/github.com/nats-io/nats.go/dependencies.tpl b/vendor/github.com/nats-io/nats.go/dependencies.tpl
new file mode 100644
index 000000000..41cd3741a
--- /dev/null
+++ b/vendor/github.com/nats-io/nats.go/dependencies.tpl
@@ -0,0 +1,9 @@
+# External Dependencies
+
+This file lists the dependencies used in this repository.
+
+{{/* compress has actually a BSD 3-Clause license, but the License file in the repo confuses go-license tooling, hence the manual exception */}}
+| Dependency | License |
+|--------------------------------------------------|-----------------------------------------|
+{{ range . }}| {{ .Name }} | {{ if eq .Name "github.com/klauspost/compress/flate" }}BSD 3-Clause{{ else }}{{ .LicenseName }}{{ end }} |
+{{ end }}
diff --git a/vendor/github.com/nats-io/nats.go/enc.go b/vendor/github.com/nats-io/nats.go/enc.go
index 4550f618d..34a3fae7f 100644
--- a/vendor/github.com/nats-io/nats.go/enc.go
+++ b/vendor/github.com/nats-io/nats.go/enc.go
@@ -24,7 +24,11 @@ import (
"github.com/nats-io/nats.go/encoders/builtin"
)
+//lint:file-ignore SA1019 Ignore deprecation warnings for EncodedConn
+
// Encoder interface is for all register encoders
+//
+// Deprecated: Encoded connections are no longer supported.
type Encoder interface {
Encode(subject string, v any) ([]byte, error)
Decode(subject string, data []byte, vPtr any) error
@@ -51,6 +55,8 @@ func init() {
// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to
// a nats server and have an extendable encoder system that will encode and decode messages
// from raw Go types.
+//
+// Deprecated: Encoded connections are no longer supported.
type EncodedConn struct {
Conn *Conn
Enc Encoder
@@ -58,6 +64,8 @@ type EncodedConn struct {
// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered
// encoder.
+//
+// Deprecated: Encoded connections are no longer supported.
func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) {
if c == nil {
return nil, errors.New("nats: Nil Connection")
@@ -73,6 +81,8 @@ func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) {
}
// RegisterEncoder will register the encType with the given Encoder. Useful for customization.
+//
+// Deprecated: Encoded connections are no longer supported.
func RegisterEncoder(encType string, enc Encoder) {
encLock.Lock()
defer encLock.Unlock()
@@ -80,6 +90,8 @@ func RegisterEncoder(encType string, enc Encoder) {
}
// EncoderForType will return the registered Encoder for the encType.
+//
+// Deprecated: Encoded connections are no longer supported.
func EncoderForType(encType string) Encoder {
encLock.Lock()
defer encLock.Unlock()
@@ -88,6 +100,8 @@ func EncoderForType(encType string) Encoder {
// Publish publishes the data argument to the given subject. The data argument
// will be encoded using the associated encoder.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) Publish(subject string, v any) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
@@ -99,6 +113,8 @@ func (c *EncodedConn) Publish(subject string, v any) error {
// PublishRequest will perform a Publish() expecting a response on the
// reply subject. Use Request() for automatically waiting for a response
// inline.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) PublishRequest(subject, reply string, v any) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
@@ -110,6 +126,8 @@ func (c *EncodedConn) PublishRequest(subject, reply string, v any) error {
// Request will create an Inbox and perform a Request() call
// with the Inbox reply for the data v. A response will be
// decoded into the vPtr Response.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Duration) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
@@ -150,6 +168,8 @@ func (c *EncodedConn) Request(subject string, v any, vPtr any, timeout time.Dura
// and demarshal it into the given struct, e.g. person.
// There are also variants where the callback wants either the subject, or the
// subject and the reply subject.
+//
+// Deprecated: Encoded connections are no longer supported.
type Handler any
// Dissect the cb Handler's signature
@@ -170,6 +190,8 @@ var emptyMsgType = reflect.TypeOf(&Msg{})
// Subscribe will create a subscription on the given subject and process incoming
// messages using the specified Handler. The Handler should be a func that matches
// a signature from the description of Handler from above.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) {
return c.subscribe(subject, _EMPTY_, cb)
}
@@ -177,6 +199,8 @@ func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, erro
// QueueSubscribe will create a queue subscription on the given subject and process
// incoming messages using the specified Handler. The Handler should be a func that
// matches a signature from the description of Handler from above.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) {
return c.subscribe(subject, queue, cb)
}
@@ -234,22 +258,28 @@ func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscriptio
cbValue.Call(oV)
}
- return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil)
+ return c.Conn.subscribe(subject, queue, natsCB, nil, nil, false, nil)
}
// FlushTimeout allows a Flush operation to have an associated timeout.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) {
return c.Conn.FlushTimeout(timeout)
}
// Flush will perform a round trip to the server and return when it
// receives the internal reply.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) Flush() error {
return c.Conn.Flush()
}
// Close will close the connection to the server. This call will release
// all blocking calls, such as Flush(), etc.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) Close() {
c.Conn.Close()
}
@@ -259,11 +289,15 @@ func (c *EncodedConn) Close() {
// will be drained and can not publish any additional messages. Upon draining
// of the publishers, the connection will be closed. Use the ClosedCB()
// option to know when the connection has moved from draining to closed.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) Drain() error {
return c.Conn.Drain()
}
// LastError reports the last error encountered via the Connection.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) LastError() error {
return c.Conn.LastError()
}
diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go
index c1d0f6f0b..e73113da8 100644
--- a/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go
+++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go
@@ -26,6 +26,8 @@ import (
// turn numbers into appropriate strings that can be decoded. It will also
// properly encoded and decode bools. If will encode a struct, but if you want
// to properly handle structures you should use JsonEncoder.
+//
+// Deprecated: Encoded connections are no longer supported.
type DefaultEncoder struct {
// Empty
}
@@ -35,6 +37,8 @@ var falseB = []byte("false")
var nilB = []byte("")
// Encode
+//
+// Deprecated: Encoded connections are no longer supported.
func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) {
switch arg := v.(type) {
case string:
@@ -58,6 +62,8 @@ func (je *DefaultEncoder) Encode(subject string, v any) ([]byte, error) {
}
// Decode
+//
+// Deprecated: Encoded connections are no longer supported.
func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr any) error {
// Figure out what it's pointing to...
sData := *(*string)(unsafe.Pointer(&data))
diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go
index 7ecf85e4d..e2e8c3202 100644
--- a/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go
+++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go
@@ -21,6 +21,8 @@ import (
// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn.
// This encoder will use the builtin encoding/gob to Marshal
// and Unmarshal most types, including structs.
+//
+// Deprecated: Encoded connections are no longer supported.
type GobEncoder struct {
// Empty
}
@@ -28,6 +30,8 @@ type GobEncoder struct {
// FIXME(dlc) - This could probably be more efficient.
// Encode
+//
+// Deprecated: Encoded connections are no longer supported.
func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) {
b := new(bytes.Buffer)
enc := gob.NewEncoder(b)
@@ -38,6 +42,8 @@ func (ge *GobEncoder) Encode(subject string, v any) ([]byte, error) {
}
// Decode
+//
+// Deprecated: Encoded connections are no longer supported.
func (ge *GobEncoder) Decode(subject string, data []byte, vPtr any) (err error) {
dec := gob.NewDecoder(bytes.NewBuffer(data))
err = dec.Decode(vPtr)
diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go
index 0540d9850..8e4c852a4 100644
--- a/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go
+++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go
@@ -21,11 +21,15 @@ import (
// JsonEncoder is a JSON Encoder implementation for EncodedConn.
// This encoder will use the builtin encoding/json to Marshal
// and Unmarshal most types, including structs.
+//
+// Deprecated: Encoded connections are no longer supported.
type JsonEncoder struct {
// Empty
}
// Encode
+//
+// Deprecated: Encoded connections are no longer supported.
func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) {
b, err := json.Marshal(v)
if err != nil {
@@ -35,6 +39,8 @@ func (je *JsonEncoder) Encode(subject string, v any) ([]byte, error) {
}
// Decode
+//
+// Deprecated: Encoded connections are no longer supported.
func (je *JsonEncoder) Decode(subject string, data []byte, vPtr any) (err error) {
switch arg := vPtr.(type) {
case *string:
diff --git a/vendor/github.com/nats-io/nats.go/go_test.mod b/vendor/github.com/nats-io/nats.go/go_test.mod
index f5b731dd0..32c1003a5 100644
--- a/vendor/github.com/nats-io/nats.go/go_test.mod
+++ b/vendor/github.com/nats-io/nats.go/go_test.mod
@@ -1,23 +1,23 @@
module github.com/nats-io/nats.go
-go 1.19
+go 1.22.0
require (
github.com/golang/protobuf v1.4.2
- github.com/klauspost/compress v1.17.8
+ github.com/klauspost/compress v1.17.11
github.com/nats-io/jwt v1.2.2
- github.com/nats-io/nats-server/v2 v2.10.16
- github.com/nats-io/nkeys v0.4.7
+ github.com/nats-io/nats-server/v2 v2.10.24
+ github.com/nats-io/nkeys v0.4.9
github.com/nats-io/nuid v1.0.1
go.uber.org/goleak v1.3.0
- golang.org/x/text v0.15.0
+ golang.org/x/text v0.21.0
google.golang.org/protobuf v1.23.0
)
require (
- github.com/minio/highwayhash v1.0.2 // indirect
- github.com/nats-io/jwt/v2 v2.5.7 // indirect
- golang.org/x/crypto v0.23.0 // indirect
- golang.org/x/sys v0.20.0 // indirect
- golang.org/x/time v0.5.0 // indirect
+ github.com/minio/highwayhash v1.0.3 // indirect
+ github.com/nats-io/jwt/v2 v2.7.3 // indirect
+ golang.org/x/crypto v0.31.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/time v0.8.0 // indirect
)
diff --git a/vendor/github.com/nats-io/nats.go/go_test.sum b/vendor/github.com/nats-io/nats.go/go_test.sum
index f89d755ba..f6223f25b 100644
--- a/vendor/github.com/nats-io/nats.go/go_test.sum
+++ b/vendor/github.com/nats-io/nats.go/go_test.sum
@@ -1,4 +1,5 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
@@ -10,40 +11,42 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
-github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
-github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
-github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
+github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
+github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
+github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
+github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
github.com/nats-io/jwt v1.2.2 h1:w3GMTO969dFg+UOKTmmyuu7IGdusK+7Ytlt//OYH/uU=
github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q=
-github.com/nats-io/jwt/v2 v2.5.7 h1:j5lH1fUXCnJnY8SsQeB/a/z9Azgu2bYIDvtPVNdxe2c=
-github.com/nats-io/jwt/v2 v2.5.7/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
-github.com/nats-io/nats-server/v2 v2.10.16 h1:2jXaiydp5oB/nAx/Ytf9fdCi9QN6ItIc9eehX8kwVV0=
-github.com/nats-io/nats-server/v2 v2.10.16/go.mod h1:Pksi38H2+6xLe1vQx0/EA4bzetM0NqyIHcIbmgXSkIU=
+github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
+github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
+github.com/nats-io/nats-server/v2 v2.10.24 h1:KcqqQAD0ZZcG4yLxtvSFJY7CYKVYlnlWoAiVZ6i/IY4=
+github.com/nats-io/nats-server/v2 v2.10.24/go.mod h1:olvKt8E5ZlnjyqBGbAXtxvSQKsPodISK5Eo/euIta4s=
github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s=
-github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
-github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
+github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
+github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
-golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
-golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
-golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
+golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -54,3 +57,4 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/nats-io/nats.go/js.go b/vendor/github.com/nats-io/nats.go/js.go
index 5f8dfe3ee..fe246aaea 100644
--- a/vendor/github.com/nats-io/nats.go/js.go
+++ b/vendor/github.com/nats-io/nats.go/js.go
@@ -1,4 +1,4 @@
-// Copyright 2020-2023 The NATS Authors
+// Copyright 2020-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -58,6 +58,19 @@ type JetStream interface {
// PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd.
PublishAsyncComplete() <-chan struct{}
+ // CleanupPublisher will cleanup the publishing side of JetStreamContext.
+ //
+ // This will unsubscribe from the internal reply subject if needed.
+ // All pending async publishes will fail with ErrJetStreamPublisherClosed.
+ //
+ // If an error handler was provided, it will be called for each pending async
+ // publish and PublishAsyncComplete will be closed.
+ //
+ // After completing JetStreamContext is still usable - internal subscription
+ // will be recreated on next publish, but the acks from previous publishes will
+ // be lost.
+ CleanupPublisher()
+
// Subscribe creates an async Subscription for JetStream.
// The stream and consumer names can be provided with the nats.Bind() option.
// For creating an ephemeral (where the consumer name is picked by the server),
@@ -107,6 +120,7 @@ type JetStream interface {
// PullSubscribe creates a Subscription that can fetch messages.
// See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be
// set to an empty string.
+ // When using PullSubscribe, the messages are fetched using Fetch() and FetchBatch() methods.
PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error)
}
@@ -466,6 +480,9 @@ type pubOpts struct {
// stallWait is the max wait of a async pub ack.
stallWait time.Duration
+
+ // internal option to re-use existing paf in case of retry.
+ pafRetry *pubAckFuture
}
// pubAckResponse is the ack response from the JetStream API when publishing a message.
@@ -531,7 +548,7 @@ func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) {
o.ttl = js.opts.wait
}
if o.stallWait > 0 {
- return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish")
+ return nil, errors.New("nats: stall wait cannot be set to sync publish")
}
if o.id != _EMPTY_ {
@@ -620,13 +637,17 @@ type PubAckFuture interface {
}
type pubAckFuture struct {
- js *js
- msg *Msg
- pa *PubAck
- st time.Time
- err error
- errCh chan error
- doneCh chan *PubAck
+ js *js
+ msg *Msg
+ pa *PubAck
+ st time.Time
+ err error
+ errCh chan error
+ doneCh chan *PubAck
+ retries int
+ maxRetries int
+ retryWait time.Duration
+ reply string
}
func (paf *pubAckFuture) Ok() <-chan *PubAck {
@@ -719,10 +740,7 @@ func (js *js) resetPendingAcksOnReconnect() {
paf.errCh <- paf.err
}
if errCb != nil {
- // clear reply subject so that new one is created on republish
- js.mu.Unlock()
- errCb(js, paf.msg, ErrDisconnected)
- js.mu.Lock()
+ defer errCb(js, paf.msg, ErrDisconnected)
}
delete(js.pafs, id)
}
@@ -734,6 +752,38 @@ func (js *js) resetPendingAcksOnReconnect() {
}
}
+// CleanupPublisher will cleanup the publishing side of JetStreamContext.
+//
+// This will unsubscribe from the internal reply subject if needed.
+// All pending async publishes will fail with ErrJetStreamContextClosed.
+//
+// If an error handler was provided, it will be called for each pending async
+// publish and PublishAsyncComplete will be closed.
+//
+// After completing JetStreamContext is still usable - internal subscription
+// will be recreated on next publish, but the acks from previous publishes will
+// be lost.
+func (js *js) CleanupPublisher() {
+ js.cleanupReplySub()
+ js.mu.Lock()
+ errCb := js.opts.aecb
+ for id, paf := range js.pafs {
+ paf.err = ErrJetStreamPublisherClosed
+ if paf.errCh != nil {
+ paf.errCh <- paf.err
+ }
+ if errCb != nil {
+ defer errCb(js, paf.msg, ErrJetStreamPublisherClosed)
+ }
+ delete(js.pafs, id)
+ }
+ if js.dch != nil {
+ close(js.dch)
+ js.dch = nil
+ }
+ js.mu.Unlock()
+}
+
func (js *js) cleanupReplySub() {
js.mu.Lock()
if js.rsub != nil {
@@ -806,20 +856,30 @@ func (js *js) handleAsyncReply(m *Msg) {
js.mu.Unlock()
return
}
- // Remove
- delete(js.pafs, id)
- // Check on anyone stalled and waiting.
- if js.stc != nil && len(js.pafs) < js.opts.maxpa {
- close(js.stc)
- js.stc = nil
+ closeStc := func() {
+ // Check on anyone stalled and waiting.
+ if js.stc != nil && len(js.pafs) < js.opts.maxpa {
+ close(js.stc)
+ js.stc = nil
+ }
}
- // Check on anyone one waiting on done status.
- if js.dch != nil && len(js.pafs) == 0 {
- dch := js.dch
- js.dch = nil
- // Defer here so error is processed and can be checked.
- defer close(dch)
+
+ closeDchFn := func() func() {
+ var dch chan struct{}
+ // Check on anyone one waiting on done status.
+ if js.dch != nil && len(js.pafs) == 0 {
+ dch = js.dch
+ js.dch = nil
+ }
+ // Return function to close done channel which
+ // should be deferred so that error is processed and
+ // can be checked.
+ return func() {
+ if dch != nil {
+ close(dch)
+ }
+ }
}
doErr := func(err error) {
@@ -836,10 +896,39 @@ func (js *js) handleAsyncReply(m *Msg) {
// Process no responders etc.
if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders {
+ if paf.retries < paf.maxRetries {
+ paf.retries++
+ time.AfterFunc(paf.retryWait, func() {
+ js.mu.Lock()
+ paf := js.getPAF(id)
+ js.mu.Unlock()
+ if paf == nil {
+ return
+ }
+ _, err := js.PublishMsgAsync(paf.msg, pubOptFn(func(po *pubOpts) error {
+ po.pafRetry = paf
+ return nil
+ }))
+ if err != nil {
+ js.mu.Lock()
+ doErr(err)
+ }
+ })
+ js.mu.Unlock()
+ return
+ }
+ delete(js.pafs, id)
+ closeStc()
+ defer closeDchFn()()
doErr(ErrNoResponders)
return
}
+ //remove
+ delete(js.pafs, id)
+ closeStc()
+ defer closeDchFn()()
+
var pa pubAckResponse
if err := json.Unmarshal(m.Data, &pa); err != nil {
doErr(ErrInvalidJSAck)
@@ -906,6 +995,10 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) {
}
}
+ if o.rnum < 0 {
+ return nil, fmt.Errorf("%w: retry attempts cannot be negative", ErrInvalidArg)
+ }
+
// Timeouts and contexts do not make sense for these.
if o.ttl != 0 || o.ctx != nil {
return nil, ErrContextAndTimeout
@@ -933,30 +1026,42 @@ func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) {
}
// Reply
- if m.Reply != _EMPTY_ {
+ paf := o.pafRetry
+ if paf == nil && m.Reply != _EMPTY_ {
return nil, errors.New("nats: reply subject should be empty")
}
- reply := m.Reply
- m.Reply = js.newAsyncReply()
- defer func() { m.Reply = reply }()
+ var id string
+ var reply string
- if m.Reply == _EMPTY_ {
- return nil, errors.New("nats: error creating async reply handler")
- }
+ // register new paf if not retrying
+ if paf == nil {
+ reply = js.newAsyncReply()
- id := m.Reply[js.replyPrefixLen:]
- paf := &pubAckFuture{msg: m, st: time.Now()}
- numPending, maxPending := js.registerPAF(id, paf)
+ if reply == _EMPTY_ {
+ return nil, errors.New("nats: error creating async reply handler")
+ }
+
+ id = reply[js.replyPrefixLen:]
+ paf = &pubAckFuture{msg: m, st: time.Now(), maxRetries: o.rnum, retryWait: o.rwait, reply: reply}
+ numPending, maxPending := js.registerPAF(id, paf)
- if maxPending > 0 && numPending >= maxPending {
- select {
- case <-js.asyncStall():
- case <-time.After(stallWait):
- js.clearPAF(id)
- return nil, errors.New("nats: stalled with too many outstanding async published messages")
+ if maxPending > 0 && numPending > maxPending {
+ select {
+ case <-js.asyncStall():
+ case <-time.After(stallWait):
+ js.clearPAF(id)
+ return nil, errors.New("nats: stalled with too many outstanding async published messages")
+ }
}
+ } else {
+ reply = paf.reply
+ id = reply[js.replyPrefixLen:]
+ }
+ hdr, err := m.headerBytes()
+ if err != nil {
+ return nil, err
}
- if err := js.nc.PublishMsg(m); err != nil {
+ if err := js.nc.publish(m.Subject, reply, hdr, m.Data); err != nil {
js.clearPAF(id)
return nil, err
}
@@ -1039,7 +1144,7 @@ func RetryAttempts(num int) PubOpt {
func StallWait(ttl time.Duration) PubOpt {
return pubOptFn(func(opts *pubOpts) error {
if ttl <= 0 {
- return fmt.Errorf("nats: stall wait should be more than 0")
+ return errors.New("nats: stall wait should be more than 0")
}
opts.stallWait = ttl
return nil
@@ -1397,11 +1502,11 @@ func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode boo
// Prevent an user from attempting to create a queue subscription on
// a JS consumer that was not created with a deliver group.
if queue != _EMPTY_ {
- return _EMPTY_, fmt.Errorf("cannot create a queue subscription for a consumer without a deliver group")
+ return _EMPTY_, errors.New("cannot create a queue subscription for a consumer without a deliver group")
} else if info.PushBound {
// Need to reject a non queue subscription to a non queue consumer
// if the consumer is already bound.
- return _EMPTY_, fmt.Errorf("consumer is already bound to a subscription")
+ return _EMPTY_, errors.New("consumer is already bound to a subscription")
}
} else {
// If the JS consumer has a deliver group, we need to fail a non queue
@@ -1423,7 +1528,7 @@ func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode boo
func checkConfig(s, u *ConsumerConfig) error {
makeErr := func(fieldName string, usrVal, srvVal any) error {
- return fmt.Errorf("configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal)
+ return fmt.Errorf("nats: configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal)
}
if u.Durable != _EMPTY_ && u.Durable != s.Durable {
@@ -1503,7 +1608,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
// If no stream name is specified, the subject cannot be empty.
if subj == _EMPTY_ && o.stream == _EMPTY_ {
- return nil, fmt.Errorf("nats: subject required")
+ return nil, errors.New("nats: subject required")
}
// Note that these may change based on the consumer info response we may get.
@@ -1525,7 +1630,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
// would subscribe to and server would send on.
if o.cfg.Heartbeat > 0 || o.cfg.FlowControl {
// Not making this a public ErrXXX in case we allow in the future.
- return nil, fmt.Errorf("nats: queue subscription doesn't support idle heartbeat nor flow control")
+ return nil, errors.New("nats: queue subscription doesn't support idle heartbeat nor flow control")
}
// If this is a queue subscription and no consumer nor durable name was specified,
@@ -1563,31 +1668,31 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
if o.ordered {
// Make sure we are not durable.
if isDurable {
- return nil, fmt.Errorf("nats: durable can not be set for an ordered consumer")
+ return nil, errors.New("nats: durable can not be set for an ordered consumer")
}
// Check ack policy.
if o.cfg.AckPolicy != ackPolicyNotSet {
- return nil, fmt.Errorf("nats: ack policy can not be set for an ordered consumer")
+ return nil, errors.New("nats: ack policy can not be set for an ordered consumer")
}
// Check max deliver.
if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 {
- return nil, fmt.Errorf("nats: max deliver can not be set for an ordered consumer")
+ return nil, errors.New("nats: max deliver can not be set for an ordered consumer")
}
// No deliver subject, we pick our own.
if o.cfg.DeliverSubject != _EMPTY_ {
- return nil, fmt.Errorf("nats: deliver subject can not be set for an ordered consumer")
+ return nil, errors.New("nats: deliver subject can not be set for an ordered consumer")
}
// Queue groups not allowed.
if queue != _EMPTY_ {
- return nil, fmt.Errorf("nats: queues not be set for an ordered consumer")
+ return nil, errors.New("nats: queues not be set for an ordered consumer")
}
// Check for bound consumers.
if consumer != _EMPTY_ {
- return nil, fmt.Errorf("nats: can not bind existing consumer for an ordered consumer")
+ return nil, errors.New("nats: can not bind existing consumer for an ordered consumer")
}
// Check for pull mode.
if isPullMode {
- return nil, fmt.Errorf("nats: can not use pull mode for an ordered consumer")
+ return nil, errors.New("nats: can not use pull mode for an ordered consumer")
}
// Setup how we need it to be here.
o.cfg.FlowControl = true
@@ -1735,7 +1840,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
ocb := cb
cb = func(m *Msg) { ocb(m); m.Ack() }
}
- sub, err := nc.subscribe(deliver, queue, cb, ch, isSync, jsi)
+ sub, err := nc.subscribe(deliver, queue, cb, ch, nil, isSync, jsi)
if err != nil {
return nil, err
}
@@ -1806,7 +1911,7 @@ func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync,
jsi.hbi = info.Config.Heartbeat
// Recreate the subscription here.
- sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, isSync, jsi)
+ sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, nil, isSync, jsi)
if err != nil {
return nil, err
}
@@ -2321,7 +2426,7 @@ func Description(description string) SubOpt {
func Durable(consumer string) SubOpt {
return subOptFn(func(opts *subOpts) error {
if opts.cfg.Durable != _EMPTY_ {
- return fmt.Errorf("nats: option Durable set more than once")
+ return errors.New("nats: option Durable set more than once")
}
if opts.consumer != _EMPTY_ && opts.consumer != consumer {
return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer)
@@ -2899,10 +3004,11 @@ func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) {
}
// Make our request expiration a bit shorter than the current timeout.
- expires := ttl
- if ttl >= 20*time.Millisecond {
- expires = ttl - 10*time.Millisecond
+ expiresDiff := time.Duration(float64(ttl) * 0.1)
+ if expiresDiff > 5*time.Second {
+ expiresDiff = 5 * time.Second
}
+ expires := ttl - expiresDiff
nr.Batch = batch - len(msgs)
nr.Expires = expires
@@ -3009,20 +3115,27 @@ type MessageBatch interface {
}
type messageBatch struct {
+ sync.Mutex
msgs chan *Msg
err error
done chan struct{}
}
func (mb *messageBatch) Messages() <-chan *Msg {
+ mb.Lock()
+ defer mb.Unlock()
return mb.msgs
}
func (mb *messageBatch) Error() error {
+ mb.Lock()
+ defer mb.Unlock()
return mb.err
}
func (mb *messageBatch) Done() <-chan struct{} {
+ mb.Lock()
+ defer mb.Unlock()
return mb.done
}
@@ -3166,10 +3279,11 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
ttl = time.Until(deadline)
// Make our request expiration a bit shorter than the current timeout.
- expires := ttl
- if ttl >= 20*time.Millisecond {
- expires = ttl - 10*time.Millisecond
+ expiresDiff := time.Duration(float64(ttl) * 0.1)
+ if expiresDiff > 5*time.Second {
+ expiresDiff = 5 * time.Second
}
+ expires := ttl - expiresDiff
requestBatch := batch - len(result.msgs)
req := nextRequest{
@@ -3196,12 +3310,11 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
}
var hbTimer *time.Timer
var hbErr error
- hbLock := sync.Mutex{}
if o.hb > 0 {
hbTimer = time.AfterFunc(2*o.hb, func() {
- hbLock.Lock()
+ result.Lock()
hbErr = ErrNoHeartbeat
- hbLock.Unlock()
+ result.Unlock()
cancel()
})
}
@@ -3232,21 +3345,25 @@ func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, e
break
}
if usrMsg {
+ result.Lock()
result.msgs <- msg
+ result.Unlock()
requestMsgs++
}
}
if err != nil {
- hbLock.Lock()
+ result.Lock()
if hbErr != nil {
result.err = hbErr
} else {
result.err = o.checkCtxErr(err)
}
- hbLock.Unlock()
+ result.Unlock()
}
close(result.msgs)
+ result.Lock()
result.done <- struct{}{}
+ result.Unlock()
}()
return result, nil
}
@@ -3844,7 +3961,7 @@ func (alg StoreCompression) MarshalJSON() ([]byte, error) {
case NoCompression:
str = "none"
default:
- return nil, fmt.Errorf("unknown compression algorithm")
+ return nil, errors.New("unknown compression algorithm")
}
return json.Marshal(str)
}
@@ -3860,7 +3977,7 @@ func (alg *StoreCompression) UnmarshalJSON(b []byte) error {
case "none":
*alg = NoCompression
default:
- return fmt.Errorf("unknown compression algorithm")
+ return errors.New("unknown compression algorithm")
}
return nil
}
diff --git a/vendor/github.com/nats-io/nats.go/jserrors.go b/vendor/github.com/nats-io/nats.go/jserrors.go
index f0285943b..1c22d812b 100644
--- a/vendor/github.com/nats-io/nats.go/jserrors.go
+++ b/vendor/github.com/nats-io/nats.go/jserrors.go
@@ -151,7 +151,10 @@ var (
// ErrSubscriptionClosed is returned when attempting to send pull request to a closed subscription
ErrSubscriptionClosed JetStreamError = &jsError{message: "subscription closed"}
- // DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases.
+ // ErrJetStreamPublisherClosed is returned for each unfinished ack future when JetStream.Cleanup is called.
+ ErrJetStreamPublisherClosed JetStreamError = &jsError{message: "jetstream context closed"}
+
+ // Deprecated: ErrInvalidDurableName is no longer returned and will be removed in future releases.
// Use ErrInvalidConsumerName instead.
ErrInvalidDurableName = errors.New("nats: invalid durable name")
)
diff --git a/vendor/github.com/nats-io/nats.go/jsm.go b/vendor/github.com/nats-io/nats.go/jsm.go
index 9eb5d4b4b..2ae19c7a3 100644
--- a/vendor/github.com/nats-io/nats.go/jsm.go
+++ b/vendor/github.com/nats-io/nats.go/jsm.go
@@ -41,7 +41,7 @@ type JetStreamManager interface {
PurgeStream(name string, opts ...JSOpt) error
// StreamsInfo can be used to retrieve a list of StreamInfo objects.
- // DEPRECATED: Use Streams() instead.
+ // Deprecated: Use Streams() instead.
StreamsInfo(opts ...JSOpt) <-chan *StreamInfo
// Streams can be used to retrieve a list of StreamInfo objects.
@@ -86,7 +86,7 @@ type JetStreamManager interface {
ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error)
// ConsumersInfo is used to retrieve a list of ConsumerInfo objects.
- // DEPRECATED: Use Consumers() instead.
+ // Deprecated: Use Consumers() instead.
ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo
// Consumers is used to retrieve a list of ConsumerInfo objects.
@@ -240,7 +240,7 @@ type StreamConfig struct {
// v2.10.0 or later.
Metadata map[string]string `json:"metadata,omitempty"`
- // Template identifies the template that manages the Stream. DEPRECATED:
+ // Template identifies the template that manages the Stream. Deprecated:
// This feature is no longer supported.
Template string `json:"template_owner,omitempty"`
}
@@ -747,7 +747,7 @@ func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo {
}
// ConsumersInfo is used to retrieve a list of ConsumerInfo objects.
-// DEPRECATED: Use Consumers() instead.
+// Deprecated: Use Consumers() instead.
func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo {
return jsc.Consumers(stream, opts...)
}
@@ -1330,11 +1330,11 @@ func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error
// Check for headers that give us the required information to
// reconstruct the message.
if len(r.Header) == 0 {
- return nil, fmt.Errorf("nats: response should have headers")
+ return nil, errors.New("nats: response should have headers")
}
stream := r.Header.Get(JSStream)
if stream == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing stream header")
+ return nil, errors.New("nats: missing stream header")
}
// Mirrors can now answer direct gets, so removing check for name equality.
@@ -1342,7 +1342,7 @@ func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error
seqStr := r.Header.Get(JSSequence)
if seqStr == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing sequence header")
+ return nil, errors.New("nats: missing sequence header")
}
seq, err := strconv.ParseUint(seqStr, 10, 64)
if err != nil {
@@ -1350,7 +1350,7 @@ func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error
}
timeStr := r.Header.Get(JSTimeStamp)
if timeStr == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing timestamp header")
+ return nil, errors.New("nats: missing timestamp header")
}
// Temporary code: the server in main branch is sending with format
// "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed
@@ -1365,7 +1365,7 @@ func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error
}
subj := r.Header.Get(JSSubject)
if subj == _EMPTY_ {
- return nil, fmt.Errorf("nats: missing subject header")
+ return nil, errors.New("nats: missing subject header")
}
return &RawStreamMsg{
Subject: subj,
@@ -1617,7 +1617,7 @@ func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo {
}
// StreamsInfo can be used to retrieve a list of StreamInfo objects.
-// DEPRECATED: Use Streams() instead.
+// Deprecated: Use Streams() instead.
func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo {
return jsc.Streams(opts...)
}
diff --git a/vendor/github.com/nats-io/nats.go/kv.go b/vendor/github.com/nats-io/nats.go/kv.go
index d9f40fdee..bcb283ff8 100644
--- a/vendor/github.com/nats-io/nats.go/kv.go
+++ b/vendor/github.com/nats-io/nats.go/kv.go
@@ -54,6 +54,7 @@ type KeyValue interface {
// Create will add the key/value pair iff it does not exist.
Create(key string, value []byte) (revision uint64, err error)
// Update will update the value iff the latest revision matches.
+ // Update also resets the TTL associated with the key (if any).
Update(key string, value []byte, last uint64) (revision uint64, err error)
// Delete will place a delete marker and leave all revisions.
Delete(key string, opts ...DeleteOpt) error
@@ -64,8 +65,11 @@ type KeyValue interface {
Watch(keys string, opts ...WatchOpt) (KeyWatcher, error)
// WatchAll will invoke the callback for all updates.
WatchAll(opts ...WatchOpt) (KeyWatcher, error)
+ // WatchFiltered will watch for any updates to keys that match the keys
+ // argument. It can be configured with the same options as Watch.
+ WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error)
// Keys will return all keys.
- // DEPRECATED: Use ListKeys instead to avoid memory issues.
+ // Deprecated: Use ListKeys instead to avoid memory issues.
Keys(opts ...WatchOpt) ([]string, error)
// ListKeys will return all keys in a channel.
ListKeys(opts ...WatchOpt) (KeyLister, error)
@@ -963,11 +967,11 @@ func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) {
return kv.Watch(AllKeys, opts...)
}
-// Watch will fire the callback when a key that matches the keys pattern is updated.
-// keys needs to be a valid NATS subject.
-func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) {
- if !searchKeyValid(keys) {
- return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "keys cannot be empty and must be a valid NATS subject")
+func (kv *kvs) WatchFiltered(keys []string, opts ...WatchOpt) (KeyWatcher, error) {
+ for _, key := range keys {
+ if !searchKeyValid(key) {
+ return nil, fmt.Errorf("%w: %s", ErrInvalidKey, "key cannot be empty and must be a valid NATS subject")
+ }
}
var o watchOpts
for _, opt := range opts {
@@ -979,10 +983,20 @@ func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) {
}
// Could be a pattern so don't check for validity as we normally do.
- var b strings.Builder
- b.WriteString(kv.pre)
- b.WriteString(keys)
- keys = b.String()
+ for i, key := range keys {
+ var b strings.Builder
+ b.WriteString(kv.pre)
+ b.WriteString(key)
+ keys[i] = b.String()
+ }
+
+ // if no keys are provided, watch all keys
+ if len(keys) == 0 {
+ var b strings.Builder
+ b.WriteString(kv.pre)
+ b.WriteString(AllKeys)
+ keys = []string{b.String()}
+ }
// We will block below on placing items on the chan. That is by design.
w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx}
@@ -1055,7 +1069,14 @@ func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) {
// update() callback.
w.mu.Lock()
defer w.mu.Unlock()
- sub, err := kv.js.Subscribe(keys, update, subOpts...)
+ var sub *Subscription
+ var err error
+ if len(keys) == 1 {
+ sub, err = kv.js.Subscribe(keys[0], update, subOpts...)
+ } else {
+ subOpts = append(subOpts, ConsumerFilterSubjects(keys...))
+ sub, err = kv.js.Subscribe("", update, subOpts...)
+ }
if err != nil {
return nil, err
}
@@ -1082,6 +1103,12 @@ func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) {
return w, nil
}
+// Watch will fire the callback when a key that matches the keys pattern is updated.
+// keys needs to be a valid NATS subject.
+func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) {
+ return kv.WatchFiltered([]string{keys}, opts...)
+}
+
// Bucket returns the current bucket name (JetStream stream).
func (kv *kvs) Bucket() string {
return kv.name
diff --git a/vendor/github.com/nats-io/nats.go/nats.go b/vendor/github.com/nats-io/nats.go/nats.go
index d019cee49..2b9533cd5 100644
--- a/vendor/github.com/nats-io/nats.go/nats.go
+++ b/vendor/github.com/nats-io/nats.go/nats.go
@@ -47,7 +47,7 @@ import (
// Default Constants
const (
- Version = "1.36.0"
+ Version = "1.39.0"
DefaultURL = "nats://127.0.0.1:4222"
DefaultPort = 4222
DefaultMaxReconnect = 60
@@ -86,6 +86,9 @@ const (
// MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit
MAX_CONNECTIONS_ERR = "maximum connections exceeded"
+
+ // MAX_SUBSCRIPTIONS_ERR is for when nats server denies the connection due to server subscriptions limit
+ MAX_SUBSCRIPTIONS_ERR = "maximum subscriptions exceeded"
)
// Errors
@@ -106,6 +109,7 @@ var (
ErrAuthorization = errors.New("nats: authorization violation")
ErrAuthExpired = errors.New("nats: authentication expired")
ErrAuthRevoked = errors.New("nats: authentication revoked")
+ ErrPermissionViolation = errors.New("nats: permissions violation")
ErrAccountAuthExpired = errors.New("nats: account authentication expired")
ErrNoServers = errors.New("nats: no servers available for connection")
ErrJsonParse = errors.New("nats: connect message, json parse error")
@@ -131,6 +135,7 @@ var (
ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server")
ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION)
ErrTokenAlreadySet = errors.New("nats: token and token handler both set")
+ ErrUserInfoAlreadySet = errors.New("nats: cannot set user info callback and user/pass")
ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection")
ErrMsgNoReply = errors.New("nats: message does not have a reply")
ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server")
@@ -140,6 +145,7 @@ var (
ErrNoResponders = errors.New("nats: no responders available for request")
ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded")
ErrConnectionNotTLS = errors.New("nats: connection is not tls")
+ ErrMaxSubscriptionsExceeded = errors.New("nats: server maximum subscriptions exceeded")
)
// GetDefaultOptions returns default configuration options for the client.
@@ -160,7 +166,7 @@ func GetDefaultOptions() Options {
}
}
-// DEPRECATED: Use GetDefaultOptions() instead.
+// Deprecated: Use GetDefaultOptions() instead.
// DefaultOptions is not safe for use by multiple clients.
// For details see #308.
var DefaultOptions = GetDefaultOptions()
@@ -230,6 +236,9 @@ type SignatureHandler func([]byte) ([]byte, error)
// AuthTokenHandler is used to generate a new token.
type AuthTokenHandler func() string
+// UserInfoCB is used to pass the username and password when establishing connection.
+type UserInfoCB func() (string, string)
+
// ReconnectDelayHandler is used to get from the user the desired
// delay the library should pause before attempting to reconnect
// again. Note that this is invoked after the library tried the
@@ -386,7 +395,7 @@ type Options struct {
// DisconnectedCB sets the disconnected handler that is called
// whenever the connection is disconnected.
// Will not be called if DisconnectedErrCB is set
- // DEPRECATED. Use DisconnectedErrCB which passes error that caused
+ // Deprecated. Use DisconnectedErrCB which passes error that caused
// the disconnect event.
DisconnectedCB ConnHandler
@@ -443,6 +452,9 @@ type Options struct {
// Password sets the password to be used when connecting to a server.
Password string
+ // UserInfo sets the callback handler that will fetch the username and password.
+ UserInfo UserInfoCB
+
// Token sets the token to be used when connecting to a server.
Token string
@@ -450,7 +462,7 @@ type Options struct {
TokenHandler AuthTokenHandler
// Dialer allows a custom net.Dialer when forming connections.
- // DEPRECATED: should use CustomDialer instead.
+ // Deprecated: should use CustomDialer instead.
Dialer *net.Dialer
// CustomDialer allows to specify a custom dialer (not necessarily
@@ -499,6 +511,11 @@ type Options struct {
// SkipHostLookup skips the DNS lookup for the server hostname.
SkipHostLookup bool
+
+ // PermissionErrOnSubscribe - if set to true, the client will return ErrPermissionViolation
+ // from SubscribeSync if the server returns a permissions error for a subscription.
+ // Defaults to false.
+ PermissionErrOnSubscribe bool
}
const (
@@ -607,17 +624,19 @@ type Subscription struct {
// For holding information about a JetStream consumer.
jsi *jsSub
- delivered uint64
- max uint64
- conn *Conn
- mcb MsgHandler
- mch chan *Msg
- closed bool
- sc bool
- connClosed bool
- draining bool
- status SubStatus
- statListeners map[chan SubStatus][]SubStatus
+ delivered uint64
+ max uint64
+ conn *Conn
+ mcb MsgHandler
+ mch chan *Msg
+ errCh chan (error)
+ closed bool
+ sc bool
+ connClosed bool
+ draining bool
+ status SubStatus
+ statListeners map[chan SubStatus][]SubStatus
+ permissionsErr error
// Type of Subscription
typ SubscriptionType
@@ -1108,7 +1127,7 @@ func DisconnectErrHandler(cb ConnErrHandler) Option {
}
// DisconnectHandler is an Option to set the disconnected handler.
-// DEPRECATED: Use DisconnectErrHandler.
+// Deprecated: Use DisconnectErrHandler.
func DisconnectHandler(cb ConnHandler) Option {
return func(o *Options) error {
o.DisconnectedCB = cb
@@ -1166,6 +1185,13 @@ func UserInfo(user, password string) Option {
}
}
+func UserInfoHandler(cb UserInfoCB) Option {
+ return func(o *Options) error {
+ o.UserInfo = cb
+ return nil
+ }
+}
+
// Token is an Option to set the token to use
// when a token is not included directly in the URLs
// and when a token handler is not provided.
@@ -1280,7 +1306,7 @@ func SyncQueueLen(max int) Option {
// Dialer is an Option to set the dialer which will be used when
// attempting to establish a connection.
-// DEPRECATED: Should use CustomDialer instead.
+// Deprecated: Should use CustomDialer instead.
func Dialer(dialer *net.Dialer) Option {
return func(o *Options) error {
o.Dialer = dialer
@@ -1359,7 +1385,7 @@ func ProxyPath(path string) Option {
func CustomInboxPrefix(p string) Option {
return func(o *Options) error {
if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") {
- return fmt.Errorf("nats: invalid custom prefix")
+ return errors.New("nats: invalid custom prefix")
}
o.InboxPrefix = p
return nil
@@ -1383,6 +1409,13 @@ func SkipHostLookup() Option {
}
}
+func PermissionErrOnSubscribe(enabled bool) Option {
+ return func(o *Options) error {
+ o.PermissionErrOnSubscribe = enabled
+ return nil
+ }
+}
+
// TLSHandshakeFirst is an Option to perform the TLS handshake first, that is
// before receiving the INFO protocol. This requires the server to also be
// configured with such option, otherwise the connection will fail.
@@ -1397,7 +1430,7 @@ func TLSHandshakeFirst() Option {
// Handler processing
// SetDisconnectHandler will set the disconnect event handler.
-// DEPRECATED: Use SetDisconnectErrHandler
+// Deprecated: Use SetDisconnectErrHandler
func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) {
if nc == nil {
return
@@ -1513,7 +1546,7 @@ func processUrlString(url string) []string {
urls := strings.Split(url, ",")
var j int
for _, s := range urls {
- u := strings.TrimSpace(s)
+ u := strings.TrimSuffix(strings.TrimSpace(s), "/")
if len(u) > 0 {
urls[j] = u
j++
@@ -1814,7 +1847,7 @@ func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error {
if len(nc.srvPool) == 0 {
nc.ws = isWS
} else if isWS && !nc.ws || !isWS && nc.ws {
- return fmt.Errorf("mixing of websocket and non websocket URLs is not allowed")
+ return errors.New("mixing of websocket and non websocket URLs is not allowed")
}
var tlsName string
@@ -2563,6 +2596,13 @@ func (nc *Conn) connectProto() (string, error) {
pass = o.Password
token = o.Token
nkey = o.Nkey
+
+ if nc.Opts.UserInfo != nil {
+ if user != _EMPTY_ || pass != _EMPTY_ {
+ return _EMPTY_, ErrUserInfoAlreadySet
+ }
+ user, pass = nc.Opts.UserInfo()
+ }
}
// Look for user jwt.
@@ -2952,11 +2992,11 @@ func (nc *Conn) doReconnect(err error, forceReconnect bool) {
// processOpErr handles errors from reading or parsing the protocol.
// The lock should not be held entering this function.
-func (nc *Conn) processOpErr(err error) {
+func (nc *Conn) processOpErr(err error) bool {
nc.mu.Lock()
+ defer nc.mu.Unlock()
if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() {
- nc.mu.Unlock()
- return
+ return false
}
if nc.Opts.AllowReconnect && nc.status == CONNECTED {
@@ -2976,14 +3016,12 @@ func (nc *Conn) processOpErr(err error) {
nc.clearPendingFlushCalls()
go nc.doReconnect(err, false)
- nc.mu.Unlock()
- return
+ return false
}
nc.changeConnStatus(DISCONNECTED)
nc.err = err
- nc.mu.Unlock()
- nc.close(CLOSED, true, nil)
+ return true
}
// dispatch is responsible for calling any async callbacks
@@ -3080,7 +3118,9 @@ func (nc *Conn) readLoop() {
err = nc.parse(buf)
}
if err != nil {
- nc.processOpErr(err)
+ if shouldClose := nc.processOpErr(err); shouldClose {
+ nc.close(CLOSED, true, nil)
+ }
break
}
}
@@ -3410,15 +3450,41 @@ slowConsumer:
}
}
-// processPermissionsViolation is called when the server signals a subject
-// permissions violation on either publish or subscribe.
-func (nc *Conn) processPermissionsViolation(err string) {
+var permissionsRe = regexp.MustCompile(`Subscription to "(\S+)"`)
+var permissionsQueueRe = regexp.MustCompile(`using queue "(\S+)"`)
+
+// processTransientError is called when the server signals a non terminal error
+// which does not close the connection or trigger a reconnect.
+// This will trigger the async error callback if set.
+// These errors include the following:
+// - permissions violation on publish or subscribe
+// - maximum subscriptions exceeded
+func (nc *Conn) processTransientError(err error) {
nc.mu.Lock()
- // create error here so we can pass it as a closure to the async cb dispatcher.
- e := errors.New("nats: " + err)
- nc.err = e
+ nc.err = err
+ if errors.Is(err, ErrPermissionViolation) {
+ matches := permissionsRe.FindStringSubmatch(err.Error())
+ if len(matches) >= 2 {
+ queueMatches := permissionsQueueRe.FindStringSubmatch(err.Error())
+ var q string
+ if len(queueMatches) >= 2 {
+ q = queueMatches[1]
+ }
+ subject := matches[1]
+ for _, sub := range nc.subs {
+ if sub.Subject == subject && sub.Queue == q && sub.permissionsErr == nil {
+ sub.mu.Lock()
+ if sub.errCh != nil {
+ sub.errCh <- err
+ }
+ sub.permissionsErr = err
+ sub.mu.Unlock()
+ }
+ }
+ }
+ }
if nc.Opts.AsyncErrorCB != nil {
- nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) })
+ nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) })
}
nc.mu.Unlock()
}
@@ -3650,15 +3716,17 @@ func (nc *Conn) processErr(ie string) {
// convert to lower case.
e := strings.ToLower(ne)
- close := false
+ var close bool
// FIXME(dlc) - process Slow Consumer signals special.
if e == STALE_CONNECTION {
- nc.processOpErr(ErrStaleConnection)
+ close = nc.processOpErr(ErrStaleConnection)
} else if e == MAX_CONNECTIONS_ERR {
- nc.processOpErr(ErrMaxConnectionsExceeded)
+ close = nc.processOpErr(ErrMaxConnectionsExceeded)
} else if strings.HasPrefix(e, PERMISSIONS_ERR) {
- nc.processPermissionsViolation(ne)
+ nc.processTransientError(fmt.Errorf("%w: %s", ErrPermissionViolation, ne))
+ } else if strings.HasPrefix(e, MAX_SUBSCRIPTIONS_ERR) {
+ nc.processTransientError(ErrMaxSubscriptionsExceeded)
} else if authErr := checkAuthError(e); authErr != nil {
nc.mu.Lock()
close = nc.processAuthError(authErr)
@@ -3999,10 +4067,6 @@ func (nc *Conn) respHandler(m *Msg) {
// Helper to setup and send new request style requests. Return the chan to receive the response.
func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) {
nc.mu.Lock()
- // Do setup for the new style if needed.
- if nc.respMap == nil {
- nc.initNewResp()
- }
// Create new literal Inbox and map to a chan msg.
mch := make(chan *Msg, RequestChanLen)
respInbox := nc.newRespInbox()
@@ -4013,7 +4077,7 @@ func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Ms
// Create the response subscription we will use for all new style responses.
// This will be on an _INBOX with an additional terminal token. The subscription
// will be on a wildcard.
- s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false, nil)
+ s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, nil, false, nil)
if err != nil {
nc.mu.Unlock()
return nil, token, err
@@ -4111,7 +4175,7 @@ func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration)
inbox := nc.NewInbox()
ch := make(chan *Msg, RequestChanLen)
- s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil)
+ s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, nil, true, nil)
if err != nil {
return nil, err
}
@@ -4217,14 +4281,14 @@ func (nc *Conn) respToken(respInbox string) string {
// since it can't match more than one token.
// Messages will be delivered to the associated MsgHandler.
func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) {
- return nc.subscribe(subj, _EMPTY_, cb, nil, false, nil)
+ return nc.subscribe(subj, _EMPTY_, cb, nil, nil, false, nil)
}
// ChanSubscribe will express interest in the given subject and place
// all messages received on the channel.
// You should not close the channel until sub.Unsubscribe() has been called.
func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) {
- return nc.subscribe(subj, _EMPTY_, nil, ch, false, nil)
+ return nc.subscribe(subj, _EMPTY_, nil, ch, nil, false, nil)
}
// ChanQueueSubscribe will express interest in the given subject.
@@ -4234,7 +4298,7 @@ func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error)
// You should not close the channel until sub.Unsubscribe() has been called.
// Note: This is the same than QueueSubscribeSyncWithChan.
func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) {
- return nc.subscribe(subj, group, nil, ch, false, nil)
+ return nc.subscribe(subj, group, nil, ch, nil, false, nil)
}
// SubscribeSync will express interest on the given subject. Messages will
@@ -4244,7 +4308,11 @@ func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) {
return nil, ErrInvalidConnection
}
mch := make(chan *Msg, nc.Opts.SubChanLen)
- return nc.subscribe(subj, _EMPTY_, nil, mch, true, nil)
+ var errCh chan error
+ if nc.Opts.PermissionErrOnSubscribe {
+ errCh = make(chan error, 100)
+ }
+ return nc.subscribe(subj, _EMPTY_, nil, mch, errCh, true, nil)
}
// QueueSubscribe creates an asynchronous queue subscriber on the given subject.
@@ -4252,7 +4320,7 @@ func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) {
// only one member of the group will be selected to receive any given
// message asynchronously.
func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) {
- return nc.subscribe(subj, queue, cb, nil, false, nil)
+ return nc.subscribe(subj, queue, cb, nil, nil, false, nil)
}
// QueueSubscribeSync creates a synchronous queue subscriber on the given
@@ -4261,7 +4329,11 @@ func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription
// given message synchronously using Subscription.NextMsg().
func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) {
mch := make(chan *Msg, nc.Opts.SubChanLen)
- return nc.subscribe(subj, queue, nil, mch, true, nil)
+ var errCh chan error
+ if nc.Opts.PermissionErrOnSubscribe {
+ errCh = make(chan error, 100)
+ }
+ return nc.subscribe(subj, queue, nil, mch, errCh, true, nil)
}
// QueueSubscribeSyncWithChan will express interest in the given subject.
@@ -4271,7 +4343,7 @@ func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) {
// You should not close the channel until sub.Unsubscribe() has been called.
// Note: This is the same than ChanQueueSubscribe.
func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) {
- return nc.subscribe(subj, queue, nil, ch, false, nil)
+ return nc.subscribe(subj, queue, nil, ch, nil, false, nil)
}
// badSubject will do quick test on whether a subject is acceptable.
@@ -4295,16 +4367,16 @@ func badQueue(qname string) bool {
}
// subscribe is the internal subscribe function that indicates interest in a subject.
-func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) {
+func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, errCh chan (error), isSync bool, js *jsSub) (*Subscription, error) {
if nc == nil {
return nil, ErrInvalidConnection
}
nc.mu.Lock()
defer nc.mu.Unlock()
- return nc.subscribeLocked(subj, queue, cb, ch, isSync, js)
+ return nc.subscribeLocked(subj, queue, cb, ch, errCh, isSync, js)
}
-func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) {
+func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, errCh chan (error), isSync bool, js *jsSub) (*Subscription, error) {
if nc == nil {
return nil, ErrInvalidConnection
}
@@ -4355,6 +4427,7 @@ func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg,
} else { // Sync Subscription
sub.typ = SyncSubscription
sub.mch = ch
+ sub.errCh = errCh
}
nc.subsMu.Lock()
@@ -4612,14 +4685,14 @@ func (s *Subscription) Unsubscribe() error {
// checkDrained will watch for a subscription to be fully drained
// and then remove it.
func (nc *Conn) checkDrained(sub *Subscription) {
+ if nc == nil || sub == nil {
+ return
+ }
defer func() {
sub.mu.Lock()
defer sub.mu.Unlock()
sub.draining = false
}()
- if nc == nil || sub == nil {
- return
- }
// This allows us to know that whatever we have in the client pending
// is correct and the server will not send additional information.
@@ -4799,16 +4872,92 @@ func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) {
t := globalTimerPool.Get(timeout)
defer globalTimerPool.Put(t)
+ if s.errCh != nil {
+ select {
+ case msg, ok = <-mch:
+ if !ok {
+ return nil, s.getNextMsgErr()
+ }
+ if err := s.processNextMsgDelivered(msg); err != nil {
+ return nil, err
+ }
+ case err := <-s.errCh:
+ return nil, err
+ case <-t.C:
+ return nil, ErrTimeout
+ }
+ } else {
+ select {
+ case msg, ok = <-mch:
+ if !ok {
+ return nil, s.getNextMsgErr()
+ }
+ if err := s.processNextMsgDelivered(msg); err != nil {
+ return nil, err
+ }
+ case <-t.C:
+ return nil, ErrTimeout
+ }
+ }
+
+ return msg, nil
+}
+
+// nextMsgNoTimeout works similarly to Subscription.NextMsg() but will not
+// time out. It is only used internally for non-timeout subscription iterator.
+func (s *Subscription) nextMsgNoTimeout() (*Msg, error) {
+ if s == nil {
+ return nil, ErrBadSubscription
+ }
+
+ s.mu.Lock()
+ err := s.validateNextMsgState(false)
+ if err != nil {
+ s.mu.Unlock()
+ return nil, err
+ }
+
+ // snapshot
+ mch := s.mch
+ s.mu.Unlock()
+
+ var ok bool
+ var msg *Msg
+
+ // If something is available right away, let's optimize that case.
select {
case msg, ok = <-mch:
+ if !ok {
+ return nil, s.getNextMsgErr()
+ }
+ if err := s.processNextMsgDelivered(msg); err != nil {
+ return nil, err
+ } else {
+ return msg, nil
+ }
+ default:
+ }
+
+ if s.errCh != nil {
+ select {
+ case msg, ok = <-mch:
+ if !ok {
+ return nil, s.getNextMsgErr()
+ }
+ if err := s.processNextMsgDelivered(msg); err != nil {
+ return nil, err
+ }
+ case err := <-s.errCh:
+ return nil, err
+ }
+ } else {
+ msg, ok = <-mch
if !ok {
return nil, s.getNextMsgErr()
}
if err := s.processNextMsgDelivered(msg); err != nil {
return nil, err
}
- case <-t.C:
- return nil, ErrTimeout
}
return msg, nil
@@ -4831,6 +4980,12 @@ func (s *Subscription) validateNextMsgState(pullSubInternal bool) error {
if s.mcb != nil {
return ErrSyncSubRequired
}
+ // if this subscription previously had a permissions error
+ // and no reconnect has been attempted, return the permissions error
+ // since the subscription does not exist on the server
+ if s.conn.Opts.PermissionErrOnSubscribe && s.permissionsErr != nil {
+ return s.permissionsErr
+ }
if s.sc {
s.changeSubStatus(SubscriptionActive)
s.sc = false
@@ -4902,7 +5057,8 @@ func (s *Subscription) processNextMsgDelivered(msg *Msg) error {
}
// Queued returns the number of queued messages in the client for this subscription.
-// DEPRECATED: Use Pending()
+//
+// Deprecated: Use Pending()
func (s *Subscription) QueuedMsgs() (int, error) {
m, _, err := s.Pending()
return int(m), err
@@ -5106,7 +5262,9 @@ func (nc *Conn) processPingTimer() {
nc.pout++
if nc.pout > nc.Opts.MaxPingsOut {
nc.mu.Unlock()
- nc.processOpErr(ErrStaleConnection)
+ if shouldClose := nc.processOpErr(ErrStaleConnection); shouldClose {
+ nc.close(CLOSED, true, nil)
+ }
return
}
@@ -5203,6 +5361,9 @@ func (nc *Conn) resendSubscriptions() {
for _, s := range subs {
adjustedMax := uint64(0)
s.mu.Lock()
+ // when resending subscriptions, the permissions error should be cleared
+ // since the user may have fixed the permissions issue
+ s.permissionsErr = nil
if s.max > 0 {
if s.delivered < s.max {
adjustedMax = s.max - s.delivered
@@ -5241,9 +5402,6 @@ func (nc *Conn) clearPendingFlushCalls() {
// This will clear any pending Request calls.
// Lock is assumed to be held by the caller.
func (nc *Conn) clearPendingRequestCalls() {
- if nc.respMap == nil {
- return
- }
for key, ch := range nc.respMap {
if ch != nil {
close(ch)
@@ -5791,7 +5949,7 @@ func NkeyOptionFromSeed(seedFile string) (Option, error) {
return nil, err
}
if !nkeys.IsValidPublicUserKey(pub) {
- return nil, fmt.Errorf("nats: Not a valid nkey user seed")
+ return nil, errors.New("nats: Not a valid nkey user seed")
}
sigCB := func(nonce []byte) ([]byte, error) {
return sigHandler(nonce, seedFile)
diff --git a/vendor/github.com/nats-io/nats.go/nats_iter.go b/vendor/github.com/nats-io/nats.go/nats_iter.go
new file mode 100644
index 000000000..05234d5b0
--- /dev/null
+++ b/vendor/github.com/nats-io/nats.go/nats_iter.go
@@ -0,0 +1,72 @@
+// Copyright 2012-2024 The NATS Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.23
+
+package nats
+
+import (
+ "errors"
+ "iter"
+ "time"
+)
+
+// Msgs returns an iter.Seq2[*Msg, error] that can be used to iterate over
+// messages. It can only be used with a subscription that has been created with
+// SubscribeSync or QueueSubscribeSync, otherwise it will return an error on the
+// first iteration.
+//
+// The iterator will block until a message is available. The
+// subscription will not be closed when the iterator is done.
+func (sub *Subscription) Msgs() iter.Seq2[*Msg, error] {
+ return func(yield func(*Msg, error) bool) {
+ for {
+ msg, err := sub.nextMsgNoTimeout()
+ if err != nil {
+ yield(nil, err)
+ return
+ }
+ if !yield(msg, nil) {
+ return
+ }
+
+ }
+ }
+}
+
+// MsgsTimeout returns an iter.Seq2[*Msg, error] that can be used to iterate
+// over messages. It can only be used with a subscription that has been created
+// with SubscribeSync or QueueSubscribeSync, otherwise it will return an error
+// on the first iteration.
+//
+// The iterator will block until a message is available or the timeout is
+// reached. If the timeout is reached, the iterator will return nats.ErrTimeout
+// but it will not be closed.
+func (sub *Subscription) MsgsTimeout(timeout time.Duration) iter.Seq2[*Msg, error] {
+ return func(yield func(*Msg, error) bool) {
+ for {
+ msg, err := sub.NextMsg(timeout)
+ if err != nil {
+ if !yield(nil, err) {
+ return
+ }
+ if !errors.Is(err, ErrTimeout) {
+ return
+ }
+ }
+ if !yield(msg, nil) {
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/nats-io/nats.go/netchan.go b/vendor/github.com/nats-io/nats.go/netchan.go
index 6b13690b4..35d92140c 100644
--- a/vendor/github.com/nats-io/nats.go/netchan.go
+++ b/vendor/github.com/nats-io/nats.go/netchan.go
@@ -23,6 +23,8 @@ import (
// Data will be encoded and decoded via the EncodedConn and its associated encoders.
// BindSendChan binds a channel for send operations to NATS.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) BindSendChan(subject string, channel any) error {
chVal := reflect.ValueOf(channel)
if chVal.Kind() != reflect.Chan {
@@ -61,11 +63,15 @@ func chPublish(c *EncodedConn, chVal reflect.Value, subject string) {
}
// BindRecvChan binds a channel for receive operations from NATS.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) BindRecvChan(subject string, channel any) (*Subscription, error) {
return c.bindRecvChan(subject, _EMPTY_, channel)
}
// BindRecvQueueChan binds a channel for queue-based receive operations from NATS.
+//
+// Deprecated: Encoded connections are no longer supported.
func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel any) (*Subscription, error) {
return c.bindRecvChan(subject, queue, channel)
}
@@ -107,5 +113,5 @@ func (c *EncodedConn) bindRecvChan(subject, queue string, channel any) (*Subscri
chVal.Send(oPtr)
}
- return c.Conn.subscribe(subject, queue, cb, nil, false, nil)
+ return c.Conn.subscribe(subject, queue, cb, nil, nil, false, nil)
}
diff --git a/vendor/github.com/nats-io/nats.go/testing_internal.go b/vendor/github.com/nats-io/nats.go/testing_internal.go
index 18397026a..78ca3b126 100644
--- a/vendor/github.com/nats-io/nats.go/testing_internal.go
+++ b/vendor/github.com/nats-io/nats.go/testing_internal.go
@@ -12,7 +12,6 @@
// limitations under the License.
//go:build internal_testing
-// +build internal_testing
// Functions in this file are only available when building nats.go with the
// internal_testing build tag. They are used by the nats.go test suite.
diff --git a/vendor/github.com/nats-io/nats.go/util/tls.go b/vendor/github.com/nats-io/nats.go/util/tls.go
index af9f51f05..ae16c3d69 100644
--- a/vendor/github.com/nats-io/nats.go/util/tls.go
+++ b/vendor/github.com/nats-io/nats.go/util/tls.go
@@ -11,9 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build go1.8
-// +build go1.8
-
package util
import "crypto/tls"
diff --git a/vendor/github.com/nats-io/nats.go/util/tls_go17.go b/vendor/github.com/nats-io/nats.go/util/tls_go17.go
deleted file mode 100644
index 44d46b42d..000000000
--- a/vendor/github.com/nats-io/nats.go/util/tls_go17.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2016-2022 The NATS Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.7 && !go1.8
-// +build go1.7,!go1.8
-
-package util
-
-import (
- "crypto/tls"
-)
-
-// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
-// This is temporary, until this is provided by the language.
-// https://go-review.googlesource.com/#/c/28075/
-func CloneTLSConfig(c *tls.Config) *tls.Config {
- return &tls.Config{
- Rand: c.Rand,
- Time: c.Time,
- Certificates: c.Certificates,
- NameToCertificate: c.NameToCertificate,
- GetCertificate: c.GetCertificate,
- RootCAs: c.RootCAs,
- NextProtos: c.NextProtos,
- ServerName: c.ServerName,
- ClientAuth: c.ClientAuth,
- ClientCAs: c.ClientCAs,
- InsecureSkipVerify: c.InsecureSkipVerify,
- CipherSuites: c.CipherSuites,
- PreferServerCipherSuites: c.PreferServerCipherSuites,
- SessionTicketsDisabled: c.SessionTicketsDisabled,
- SessionTicketKey: c.SessionTicketKey,
- ClientSessionCache: c.ClientSessionCache,
- MinVersion: c.MinVersion,
- MaxVersion: c.MaxVersion,
- CurvePreferences: c.CurvePreferences,
- DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
- Renegotiation: c.Renegotiation,
- }
-}
diff --git a/vendor/github.com/nats-io/nats.go/ws.go b/vendor/github.com/nats-io/nats.go/ws.go
index 2c2d421a8..46d8300de 100644
--- a/vendor/github.com/nats-io/nats.go/ws.go
+++ b/vendor/github.com/nats-io/nats.go/ws.go
@@ -76,14 +76,15 @@ var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff}
type websocketReader struct {
- r io.Reader
- pending [][]byte
- ib []byte
- ff bool
- fc bool
- nl bool
- dc *wsDecompressor
- nc *Conn
+ r io.Reader
+ pending [][]byte
+ compress bool
+ ib []byte
+ ff bool
+ fc bool
+ nl bool
+ dc *wsDecompressor
+ nc *Conn
}
type wsDecompressor struct {
@@ -237,8 +238,8 @@ func (r *websocketReader) Read(p []byte) (int, error) {
case wsPingMessage, wsPongMessage, wsCloseMessage:
if rem > wsMaxControlPayloadSize {
return 0, fmt.Errorf(
- fmt.Sprintf("control frame length bigger than maximum allowed of %v bytes",
- wsMaxControlPayloadSize))
+ "control frame length bigger than maximum allowed of %v bytes",
+ wsMaxControlPayloadSize)
}
if compressed {
return 0, errors.New("control frame should not be compressed")
@@ -312,6 +313,8 @@ func (r *websocketReader) Read(p []byte) (int, error) {
}
r.fc = false
}
+ } else if r.compress {
+ b = bytes.Clone(b)
}
// Add to the pending list if dealing with uncompressed frames or
// after we have received the full compressed message and decompressed it.
@@ -622,7 +625,7 @@ func (nc *Conn) wsInitHandshake(u *url.URL) error {
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) {
- err = fmt.Errorf("invalid websocket connection")
+ err = errors.New("invalid websocket connection")
}
// Check compression extension...
if err == nil && compress {
@@ -634,7 +637,7 @@ func (nc *Conn) wsInitHandshake(u *url.URL) error {
if !srvCompress {
compress = false
} else if !noCtxTakeover {
- err = fmt.Errorf("compression negotiation error")
+ err = errors.New("compression negotiation error")
}
}
if resp != nil {
@@ -647,6 +650,7 @@ func (nc *Conn) wsInitHandshake(u *url.URL) error {
wsr := wsNewReader(nc.br.r)
wsr.nc = nc
+ wsr.compress = compress
// We have to slurp whatever is in the bufio reader and copy to br.r
if n := br.Buffered(); n != 0 {
wsr.ib, _ = br.Peek(n)
diff --git a/vendor/github.com/nats-io/nkeys/keypair.go b/vendor/github.com/nats-io/nkeys/keypair.go
index 9d0551806..69ebe21f7 100644
--- a/vendor/github.com/nats-io/nkeys/keypair.go
+++ b/vendor/github.com/nats-io/nkeys/keypair.go
@@ -1,4 +1,4 @@
-// Copyright 2018-2022 The NATS Authors
+// Copyright 2018-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -15,10 +15,9 @@ package nkeys
import (
"bytes"
+ "crypto/ed25519"
"crypto/rand"
"io"
-
- "golang.org/x/crypto/ed25519"
)
// kp is the internal struct for a kepypair using seed.
@@ -31,7 +30,7 @@ const seedLen = 32
// CreatePair will create a KeyPair based on the rand entropy and a type/prefix byte.
func CreatePair(prefix PrefixByte) (KeyPair, error) {
- return CreatePairWithRand(prefix, rand.Reader)
+ return CreatePairWithRand(prefix, nil)
}
// CreatePair will create a KeyPair based on the rand reader and a type/prefix byte. rand can be nil.
@@ -39,17 +38,12 @@ func CreatePairWithRand(prefix PrefixByte, rr io.Reader) (KeyPair, error) {
if prefix == PrefixByteCurve {
return CreateCurveKeysWithRand(rr)
}
- if rr == nil {
- rr = rand.Reader
- }
- var rawSeed [seedLen]byte
-
- _, err := io.ReadFull(rr, rawSeed[:])
+ _, priv, err := ed25519.GenerateKey(rr)
if err != nil {
return nil, err
}
- seed, err := EncodeSeed(prefix, rawSeed[:])
+ seed, err := EncodeSeed(prefix, priv.Seed())
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/nats-io/nkeys/public.go b/vendor/github.com/nats-io/nkeys/public.go
index c3cd21edb..a6e88c9ba 100644
--- a/vendor/github.com/nats-io/nkeys/public.go
+++ b/vendor/github.com/nats-io/nkeys/public.go
@@ -1,4 +1,4 @@
-// Copyright 2018 The NATS Authors
+// Copyright 2018-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -14,10 +14,9 @@
package nkeys
import (
+ "crypto/ed25519"
"crypto/rand"
"io"
-
- "golang.org/x/crypto/ed25519"
)
// A KeyPair from a public key capable of verifying only.
diff --git a/vendor/github.com/nats-io/nkeys/xkeys.go b/vendor/github.com/nats-io/nkeys/xkeys.go
index 78f8b99e1..7951fb713 100644
--- a/vendor/github.com/nats-io/nkeys/xkeys.go
+++ b/vendor/github.com/nats-io/nkeys/xkeys.go
@@ -1,4 +1,4 @@
-// Copyright 2022-2023 The NATS Authors
+// Copyright 2022-2024 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -15,6 +15,7 @@ package nkeys
import (
"bytes"
+ "crypto/ed25519"
"crypto/rand"
"encoding/binary"
"io"
@@ -40,17 +41,18 @@ type ckp struct {
// CreateCurveKeys will create a Curve typed KeyPair.
func CreateCurveKeys() (KeyPair, error) {
- return CreateCurveKeysWithRand(rand.Reader)
+ return CreateCurveKeysWithRand(nil)
}
// CreateCurveKeysWithRand will create a Curve typed KeyPair
// with specified rand source.
func CreateCurveKeysWithRand(rr io.Reader) (KeyPair, error) {
var kp ckp
- _, err := io.ReadFull(rr, kp.seed[:])
+ _, priv, err := ed25519.GenerateKey(rr)
if err != nil {
return nil, err
}
+ kp.seed = [curveKeyLen]byte(priv.Seed())
return &kp, nil
}
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index cee360db7..2f1549075 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -483,6 +483,8 @@ type Histogram struct {
// histograms.
PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+ // Only used for native histograms. These exemplars MUST have a timestamp.
+ Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
}
func (x *Histogram) Reset() {
@@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 {
return nil
}
+func (x *Histogram) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
@@ -923,6 +932,7 @@ type MetricFamily struct {
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
}
func (x *MetricFamily) Reset() {
@@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric {
return nil
}
+func (x *MetricFamily) GetUnit() string {
+ if x != nil && x.Unit != nil {
+ return *x.Unit
+ }
+ return ""
+}
+
var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
@@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
@@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
- 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
- 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
- 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
- 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
- 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
- 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
- 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
- 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
- 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
- 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
- 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
- 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
- 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
- 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
- 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+ 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+ 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+ 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
- 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
- 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
- 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+ 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
- 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
- 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
- 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
- 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
- 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
- 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
- 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
- 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
- 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
- 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
- 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
- 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
- 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
- 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
- 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
- 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
- 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
- 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
- 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
- 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
- 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
- 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
- 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
- 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74,
+ 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+ 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+ 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+ 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+ 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+ 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74,
}
var (
@@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
- 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
- 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
- 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
- 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
- 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
- 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
- 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
- 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
- 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
- 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
- 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
- 19, // [19:19] is the sub-list for method output_type
- 19, // [19:19] is the sub-list for method input_type
- 19, // [19:19] is the sub-list for extension type_name
- 19, // [19:19] is the sub-list for extension extendee
- 0, // [0:19] is the sub-list for field type_name
+ 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+ 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
}
func init() { file_io_prometheus_client_metrics_proto_init() }
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 0ca86a3dc..a909b171c 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
package expfmt
import (
+ "bufio"
"fmt"
"io"
"math"
@@ -21,8 +22,8 @@ import (
"net/http"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/encoding/protodelim"
- "github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
"github.com/prometheus/common/model"
)
@@ -86,8 +87,10 @@ type protoDecoder struct {
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- _, err := pbutil.ReadDelimited(d.r, v)
- if err != nil {
+ opts := protodelim.UnmarshalOptions{
+ MaxSize: -1,
+ }
+ if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
return err
}
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index ca2140600..02b7a5e81 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,10 +18,11 @@ import (
"io"
"net/http"
- "github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+ "google.golang.org/protobuf/encoding/protodelim"
"google.golang.org/protobuf/encoding/prototext"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
dto "github.com/prometheus/client_model/go"
)
@@ -120,7 +121,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
case FmtProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := pbutil.WriteDelimited(w, v)
+ _, err := protodelim.MarshalTo(w, v)
return err
},
close: func() error { return nil },
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 35db1cc9d..26490211a 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,6 +16,7 @@ package expfmt
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"math"
@@ -24,8 +25,9 @@ import (
dto "github.com/prometheus/client_model/go"
- "github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
+
+ "github.com/prometheus/common/model"
)
// A stateFn is a function that represents a state in a state machine. By
@@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
- if p.err == io.EOF {
+ if p.err != nil && errors.Is(p.err, io.EOF) {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
@@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn {
// which is not an error but the signal that we are done.
// Any other error that happens to align with the start of
// a line is still an error.
- if p.err == io.EOF {
+ if errors.Is(p.err, io.EOF) {
p.err = nil
}
return nil
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c7a..178fdbaf6 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -90,13 +90,13 @@ func (a *Alert) Validate() error {
return fmt.Errorf("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %s", err)
+ return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %s", err)
+ return fmt.Errorf("invalid annotations: %w", err)
}
return nil
}
diff --git a/vendor/github.com/nats-io/nats.go/rand.go b/vendor/github.com/prometheus/common/model/metadata.go
similarity index 50%
rename from vendor/github.com/nats-io/nats.go/rand.go
rename to vendor/github.com/prometheus/common/model/metadata.go
index 0cdee0acd..447ab8ad6 100644
--- a/vendor/github.com/nats-io/nats.go/rand.go
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The NATS Authors
+// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
@@ -11,19 +11,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build !go1.20
-// +build !go1.20
+package model
-// A Go client for the NATS messaging system (https://nats.io).
-package nats
+// MetricType represents metric type values.
+type MetricType string
-import (
- "math/rand"
- "time"
+const (
+ MetricTypeCounter = MetricType("counter")
+ MetricTypeGauge = MetricType("gauge")
+ MetricTypeHistogram = MetricType("histogram")
+ MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+ MetricTypeSummary = MetricType("summary")
+ MetricTypeInfo = MetricType("info")
+ MetricTypeStateset = MetricType("stateset")
+ MetricTypeUnknown = MetricType("unknown")
)
-
-func init() {
- // This is not needed since Go 1.20 because now rand.Seed always happens
- // by default (uses runtime.fastrand64 instead as source).
- rand.Seed(time.Now().UnixNano())
-}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7fe..f8c5eabaa 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -20,12 +20,10 @@ import (
"strings"
)
-var (
- // MetricNameRE is a regular expression matching valid metric
- // names. Note that the IsValidMetricName function performs the same
- // check but faster than a match with this regular expression.
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-)
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13c6..dc8a0026c 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@ import (
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
-var (
- // cache the signature of an empty label set.
- emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889d2..910b0b71f 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %s", err)
+ return fmt.Errorf("invalid matcher: %w", err)
}
}
if s.StartsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index 9eb440413..8050637d8 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -21,14 +21,12 @@ import (
"strings"
)
-var (
- // ZeroSample is the pseudo zero-value of Sample used to signal a
- // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
- // and metric nil. Note that the natural zero value of Sample has a timestamp
- // of 0, which is possible to appear in a real Sample and thus not suitable
- // to signal a non-existing Sample.
- ZeroSample = Sample{Timestamp: Earliest}
-)
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
// Sample is a sample pair associated with a metric. A single sample must either
// define Value or Histogram but not both. Histogram == nil implies the Value
@@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
value, err := strconv.ParseFloat(f, 64)
if err != nil {
- return fmt.Errorf("error parsing sample value: %s", err)
+ return fmt.Errorf("error parsing sample value: %w", err)
}
s.Value = SampleValue(value)
return nil
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
index 0f615a705..ae35cc2ab 100644
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -20,14 +20,12 @@ import (
"strconv"
)
-var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-)
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
// A SampleValue is a representation of a value for a given sample at a given
// time.
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
index 56ba67d3e..e00f3b365 100644
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke @discordianfish
-* Paul Gier @pgier
+* Paul Gier @pgier
+* Ben Kochie @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 062a28185..0e9ace29b 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -49,23 +49,23 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell command -v gotestsum > /dev/null),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
-PROMU_VERSION ?= 0.15.0
+PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.54.2
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
+GOLANGCI_LINT_VERSION ?= v1.56.2
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
+ ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
ifneq (,$(SKIP_GOLANGCI_LINT))
@@ -169,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
- $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint fix"
+ $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell command -v yamllint > /dev/null))
+ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@@ -204,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+ @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 28783e2dd..cdcc8a7cc 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
- return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
+ return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
- return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
+ return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
- return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
+ return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index 4a173636c..838075009 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
- node := strings.TrimRight(parts[1], ",")
- zone := strings.TrimRight(parts[3], ",")
+ node := strings.TrimSuffix(parts[1], ",")
+ zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
- return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
+ return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index f4f5501c6..f0950bb49 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
}
field := strings.SplitN(firstLine, ": ", 2)
@@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, errors.New("invalid cpuinfo file: " + firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
index 9a73e2639..5f2a37a78 100644
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
+ return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
- return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index f560a8db3..cf2e3eaa0 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
- return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
+ return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
- return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
+ return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 5a145bbfe..bc3a20c93 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
- return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
+ return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
- return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
+ return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0,
- fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
+ fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 59465c5bb..332e76c17 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
+ return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index fdd4b9544..67a9d2b44 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -23,7 +23,7 @@ import (
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
- recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+ recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
@@ -50,6 +50,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
+ // Number of blocks on the device that need to be synced.
+ BlocksToBeSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
@@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
- return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
+ return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
@@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
+ return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
+ blocksSynced := size
+ blocksToBeSynced := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
@@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
- syncedBlocks = 0
+ blocksSynced = 0
} else {
- syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
+ blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
+ return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
- BlocksSynced: syncedBlocks,
+ BlocksSynced: blocksSynced,
+ BlocksToBeSynced: blocksToBeSynced,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
@@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
- return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
- return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
return active, total, down, size, nil
}
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
}
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ blocks := strings.Split(matches[1], "/")
+ blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
+ }
+
+ blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+ if err != nil {
+ return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
- return syncedBlocks, pct, finish, speed, nil
+ return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index eaf00e224..4b2c4050a 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -126,6 +126,7 @@ type Meminfo struct {
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
+ Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
@@ -140,6 +141,55 @@ type Meminfo struct {
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
+
+ // The struct fields below are the byte-normalized counterparts to the
+ // existing struct fields. Values are normalized using the optional
+ // unit field in the meminfo line.
+ MemTotalBytes *uint64
+ MemFreeBytes *uint64
+ MemAvailableBytes *uint64
+ BuffersBytes *uint64
+ CachedBytes *uint64
+ SwapCachedBytes *uint64
+ ActiveBytes *uint64
+ InactiveBytes *uint64
+ ActiveAnonBytes *uint64
+ InactiveAnonBytes *uint64
+ ActiveFileBytes *uint64
+ InactiveFileBytes *uint64
+ UnevictableBytes *uint64
+ MlockedBytes *uint64
+ SwapTotalBytes *uint64
+ SwapFreeBytes *uint64
+ DirtyBytes *uint64
+ WritebackBytes *uint64
+ AnonPagesBytes *uint64
+ MappedBytes *uint64
+ ShmemBytes *uint64
+ SlabBytes *uint64
+ SReclaimableBytes *uint64
+ SUnreclaimBytes *uint64
+ KernelStackBytes *uint64
+ PageTablesBytes *uint64
+ NFSUnstableBytes *uint64
+ BounceBytes *uint64
+ WritebackTmpBytes *uint64
+ CommitLimitBytes *uint64
+ CommittedASBytes *uint64
+ VmallocTotalBytes *uint64
+ VmallocUsedBytes *uint64
+ VmallocChunkBytes *uint64
+ PercpuBytes *uint64
+ HardwareCorruptedBytes *uint64
+ AnonHugePagesBytes *uint64
+ ShmemHugePagesBytes *uint64
+ ShmemPmdMappedBytes *uint64
+ CmaTotalBytes *uint64
+ CmaFreeBytes *uint64
+ HugepagesizeBytes *uint64
+ DirectMap4kBytes *uint64
+ DirectMap2MBytes *uint64
+ DirectMap1GBytes *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
- return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
+ return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
}
return *m, nil
@@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
- // Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
- }
+ var val, valBytes uint64
- v, err := strconv.ParseUint(fields[1], 0, 64)
+ val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
+ switch len(fields) {
+ case 2:
+ // No unit present, use the parsed the value as bytes directly.
+ valBytes = val
+ case 3:
+ // Unit present in optional 3rd field, convert it to
+ // bytes. The only unit supported within the Linux
+ // kernel is `kB`.
+ if fields[2] != "kB" {
+ return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+ }
+
+ valBytes = 1024 * val
+
+ default:
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+ }
+
switch fields[0] {
case "MemTotal:":
- m.MemTotal = &v
+ m.MemTotal = &val
+ m.MemTotalBytes = &valBytes
case "MemFree:":
- m.MemFree = &v
+ m.MemFree = &val
+ m.MemFreeBytes = &valBytes
case "MemAvailable:":
- m.MemAvailable = &v
+ m.MemAvailable = &val
+ m.MemAvailableBytes = &valBytes
case "Buffers:":
- m.Buffers = &v
+ m.Buffers = &val
+ m.BuffersBytes = &valBytes
case "Cached:":
- m.Cached = &v
+ m.Cached = &val
+ m.CachedBytes = &valBytes
case "SwapCached:":
- m.SwapCached = &v
+ m.SwapCached = &val
+ m.SwapCachedBytes = &valBytes
case "Active:":
- m.Active = &v
+ m.Active = &val
+ m.ActiveBytes = &valBytes
case "Inactive:":
- m.Inactive = &v
+ m.Inactive = &val
+ m.InactiveBytes = &valBytes
case "Active(anon):":
- m.ActiveAnon = &v
+ m.ActiveAnon = &val
+ m.ActiveAnonBytes = &valBytes
case "Inactive(anon):":
- m.InactiveAnon = &v
+ m.InactiveAnon = &val
+ m.InactiveAnonBytes = &valBytes
case "Active(file):":
- m.ActiveFile = &v
+ m.ActiveFile = &val
+ m.ActiveFileBytes = &valBytes
case "Inactive(file):":
- m.InactiveFile = &v
+ m.InactiveFile = &val
+ m.InactiveFileBytes = &valBytes
case "Unevictable:":
- m.Unevictable = &v
+ m.Unevictable = &val
+ m.UnevictableBytes = &valBytes
case "Mlocked:":
- m.Mlocked = &v
+ m.Mlocked = &val
+ m.MlockedBytes = &valBytes
case "SwapTotal:":
- m.SwapTotal = &v
+ m.SwapTotal = &val
+ m.SwapTotalBytes = &valBytes
case "SwapFree:":
- m.SwapFree = &v
+ m.SwapFree = &val
+ m.SwapFreeBytes = &valBytes
case "Dirty:":
- m.Dirty = &v
+ m.Dirty = &val
+ m.DirtyBytes = &valBytes
case "Writeback:":
- m.Writeback = &v
+ m.Writeback = &val
+ m.WritebackBytes = &valBytes
case "AnonPages:":
- m.AnonPages = &v
+ m.AnonPages = &val
+ m.AnonPagesBytes = &valBytes
case "Mapped:":
- m.Mapped = &v
+ m.Mapped = &val
+ m.MappedBytes = &valBytes
case "Shmem:":
- m.Shmem = &v
+ m.Shmem = &val
+ m.ShmemBytes = &valBytes
case "Slab:":
- m.Slab = &v
+ m.Slab = &val
+ m.SlabBytes = &valBytes
case "SReclaimable:":
- m.SReclaimable = &v
+ m.SReclaimable = &val
+ m.SReclaimableBytes = &valBytes
case "SUnreclaim:":
- m.SUnreclaim = &v
+ m.SUnreclaim = &val
+ m.SUnreclaimBytes = &valBytes
case "KernelStack:":
- m.KernelStack = &v
+ m.KernelStack = &val
+ m.KernelStackBytes = &valBytes
case "PageTables:":
- m.PageTables = &v
+ m.PageTables = &val
+ m.PageTablesBytes = &valBytes
case "NFS_Unstable:":
- m.NFSUnstable = &v
+ m.NFSUnstable = &val
+ m.NFSUnstableBytes = &valBytes
case "Bounce:":
- m.Bounce = &v
+ m.Bounce = &val
+ m.BounceBytes = &valBytes
case "WritebackTmp:":
- m.WritebackTmp = &v
+ m.WritebackTmp = &val
+ m.WritebackTmpBytes = &valBytes
case "CommitLimit:":
- m.CommitLimit = &v
+ m.CommitLimit = &val
+ m.CommitLimitBytes = &valBytes
case "Committed_AS:":
- m.CommittedAS = &v
+ m.CommittedAS = &val
+ m.CommittedASBytes = &valBytes
case "VmallocTotal:":
- m.VmallocTotal = &v
+ m.VmallocTotal = &val
+ m.VmallocTotalBytes = &valBytes
case "VmallocUsed:":
- m.VmallocUsed = &v
+ m.VmallocUsed = &val
+ m.VmallocUsedBytes = &valBytes
case "VmallocChunk:":
- m.VmallocChunk = &v
+ m.VmallocChunk = &val
+ m.VmallocChunkBytes = &valBytes
+ case "Percpu:":
+ m.Percpu = &val
+ m.PercpuBytes = &valBytes
case "HardwareCorrupted:":
- m.HardwareCorrupted = &v
+ m.HardwareCorrupted = &val
+ m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:":
- m.AnonHugePages = &v
+ m.AnonHugePages = &val
+ m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:":
- m.ShmemHugePages = &v
+ m.ShmemHugePages = &val
+ m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &v
+ m.ShmemPmdMapped = &val
+ m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:":
- m.CmaTotal = &v
+ m.CmaTotal = &val
+ m.CmaTotalBytes = &valBytes
case "CmaFree:":
- m.CmaFree = &v
+ m.CmaFree = &val
+ m.CmaFreeBytes = &valBytes
case "HugePages_Total:":
- m.HugePagesTotal = &v
+ m.HugePagesTotal = &val
case "HugePages_Free:":
- m.HugePagesFree = &v
+ m.HugePagesFree = &val
case "HugePages_Rsvd:":
- m.HugePagesRsvd = &v
+ m.HugePagesRsvd = &val
case "HugePages_Surp:":
- m.HugePagesSurp = &v
+ m.HugePagesSurp = &val
case "Hugepagesize:":
- m.Hugepagesize = &v
+ m.Hugepagesize = &val
+ m.HugepagesizeBytes = &valBytes
case "DirectMap4k:":
- m.DirectMap4k = &v
+ m.DirectMap4k = &val
+ m.DirectMap4kBytes = &valBytes
case "DirectMap2M:":
- m.DirectMap2M = &v
+ m.DirectMap2M = &val
+ m.DirectMap2MBytes = &valBytes
case "DirectMap1G:":
- m.DirectMap1G = &v
+ m.DirectMap1G = &val
+ m.DirectMap1GBytes = &valBytes
}
}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
index 388ebf396..a704c5e73 100644
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
- return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
+ return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
}
}
return mount, nil
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index 9d8af6db7..75a3b6c81 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -88,7 +88,7 @@ type MountStatsNFS struct {
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
- Transport NFSTransportStats
+ Transport []NFSTransportStats
}
// mountStats implements MountStats.
@@ -194,8 +194,6 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64
- // The average time from the point the client sends RPC requests until it receives the response.
- AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64
}
@@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, err
}
- stats.Transport = *tstats
+ stats.Transport = append(stats.Transport, *tstats)
}
// When encountering "per-operation statistics", we must break this
@@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7],
}
- if ns[0] != 0 {
- opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
- }
if len(ns) > 8 {
opStats.Errors = ns[8]
@@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
- return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
+ return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index fdfa45611..316df5fbb 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
@@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
entries, err := util.ParseHexUint64s(fields)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
+ return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index 4da81ea57..b70f1fc7a 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -50,10 +50,13 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
+ // Drops shows the total number of dropped packets of all UPD sockets.
+ Drops *uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
@@ -66,6 +69,7 @@ type (
RxQueue uint64
UID uint64
Inode uint64
+ Drops *uint64
}
)
@@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
defer f.Close()
var netIPSocket NetIPSocket
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
@@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
+ var udpPacketDrops uint64
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
+ if isUDP {
+ udpPacketDrops += *line.Drops
+ netIPSocketSummary.Drops = &udpPacketDrops
+ }
}
if err := s.Err(); err != nil {
return nil, err
@@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
+ return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) {
}
return i, nil
default:
- return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
+ return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
@@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
+ return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
@@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
+ return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
@@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
+ return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
+ return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
@@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
+ return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
+ return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
+ return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
- return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+ return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+ }
+
+ // drops
+ if isUDP {
+ drops, err := strconv.ParseUint(fields[12], 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+ }
+ line.Drops = &drops
}
return line, nil
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
index 360e36af7..fae62b13d 100644
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
+ return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
- return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
+ return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index c77085291..71c8059f4 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
+ return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 000000000..13994c178
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+ // number of TX sessions currently installed where host handles cryptography
+ TLSCurrTxSw int
+ // number of RX sessions currently installed where host handles cryptography
+ TLSCurrRxSw int
+ // number of TX sessions currently installed where NIC handles cryptography
+ TLSCurrTxDevice int
+ // number of RX sessions currently installed where NIC handles cryptography
+ TLSCurrRxDevice int
+ //number of TX sessions opened with host cryptography
+ TLSTxSw int
+ //number of RX sessions opened with host cryptography
+ TLSRxSw int
+ // number of TX sessions opened with NIC cryptography
+ TLSTxDevice int
+ // number of RX sessions opened with NIC cryptography
+ TLSRxDevice int
+ // record decryption failed (e.g. due to incorrect authentication tag)
+ TLSDecryptError int
+ // number of RX resyncs sent to NICs handling cryptography
+ TLSRxDeviceResync int
+ // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+ TLSDecryptRetry int
+ // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+ TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+ file, err := os.Open(fs.proc.Path("net/tls_stat"))
+ if err != nil {
+ return TLSStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ tlsstat = TLSStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ switch name {
+ case "TlsCurrTxSw":
+ tlsstat.TLSCurrTxSw = value
+ case "TlsCurrRxSw":
+ tlsstat.TLSCurrRxSw = value
+ case "TlsCurrTxDevice":
+ tlsstat.TLSCurrTxDevice = value
+ case "TlsCurrRxDevice":
+ tlsstat.TLSCurrRxDevice = value
+ case "TlsTxSw":
+ tlsstat.TLSTxSw = value
+ case "TlsRxSw":
+ tlsstat.TLSRxSw = value
+ case "TlsTxDevice":
+ tlsstat.TLSTxDevice = value
+ case "TlsRxDevice":
+ tlsstat.TLSRxDevice = value
+ case "TlsDecryptError":
+ tlsstat.TLSDecryptError = value
+ case "TlsRxDeviceResync":
+ tlsstat.TLSRxDeviceResync = value
+ case "TlsDecryptRetry":
+ tlsstat.TLSDecryptRetry = value
+ case "TlsRxNoPadViolation":
+ tlsstat.TLSRxNoPadViolation = value
+ }
+
+ }
+
+ return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index acbbc57ea..d868cebda 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
- return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
- return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
@@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
- return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
+ return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
- return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
+ return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
- return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
+ return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
- return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
+ return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
- return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
+ return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
index 7443edca9..7c597bc87 100644
--- a/vendor/github.com/prometheus/procfs/net_wireless.go
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
+ return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
}
return m, nil
@@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
- return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+ return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
- return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+ return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
- return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+ return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
- return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+ return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
- return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+ return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
- return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+ return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
- return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+ return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
- return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+ return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
- return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+ return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
}
w := &Wireless{
@@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
}
if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+ return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
}
return interfaces, nil
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index d1f71caa5..142796368 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
+ return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
- return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
}
// Wchan returns the wchan (wait channel) of a process.
@@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
+ return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index c86d815d7..9530b14bc 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
- return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
+ return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index c22666750..0f8f847f9 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
+ return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
- return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
+ return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index fe9dbb425..ccd35f153 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -61,7 +61,7 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
- return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
+ return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
return parsePSIStats(bytes.NewReader(data))
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index ad8785a40..09060e820 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
v := strings.TrimSpace(kv[1])
- v = strings.TrimRight(v, " kB")
+ v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 923e55005..06a8d931c 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -110,6 +110,11 @@ type ProcStat struct {
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
+ // Guest time of the process (time spent running a virtual CPU for a guest
+ // operating system), measured in clock ticks.
+ GuestTime int
+ // Guest time of the process's children, measured in clock ticks.
+ CGuestTime int
proc FS
}
@@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) {
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
+ &s.GuestTime,
+ &s.CGuestTime,
)
if err != nil {
return ProcStat{}, err
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 46307f572..a055197c6 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -15,6 +15,7 @@ package procfs
import (
"bytes"
+ "math/bits"
"sort"
"strconv"
"strings"
@@ -76,9 +77,9 @@ type ProcStatus struct {
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]string
+ UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]string
+ GIDs [4]uint64
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
@@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) {
// convert kB to B
vBytes := vKBytes * 1024
- s.fillStatus(k, v, vKBytes, vBytes)
+ err = s.fillStatus(k, v, vKBytes, vBytes)
+ if err != nil {
+ return ProcStatus{}, err
+ }
}
return s, nil
}
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
- copy(s.UIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
case "Gid":
- copy(s.GIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
case "NSpid":
s.NSpids = calcNSPidsList(vString)
case "VmPeak":
@@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.CpusAllowedList = calcCpusAllowedList(vString)
}
+ return nil
}
// TotalCtxtSwitches returns the total context switch.
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
index 12c5bf05b..5eefbe2ef 100644
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f)
values[i] = vp.Int()
if err := vp.Err(); err != nil {
- return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
+ return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
}
}
return values, nil
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
index b8fad677d..28708e074 100644
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TIMER:":
@@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_TX:":
@@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_RX:":
@@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "BLOCK:":
@@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "IRQ_POLL:":
@@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TASKLET:":
@@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "SCHED:":
@@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "HRTIMER:":
@@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "RCU:":
@@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
}
}
}
}
if err := scanner.Err(); err != nil {
- return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
}
return softirqs, scanner.Err()
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 34fc3ee21..e36b41c18 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
}
if count == 0 {
return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
@@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
- return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
}
return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
+ return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
}
return softIRQStat, total, nil
@@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) {
}
if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
}
return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
index fa00f555d..65fec834b 100644
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
- return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
+ return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
- return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
+ return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
- return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
+ return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
}
return swap, nil
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
index df2215ece..80e0e947b 100644
--- a/vendor/github.com/prometheus/procfs/thread.go
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err)
+ return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
}
t := Procs{}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index ce5fefa5b..e54d94b09 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
- return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
- return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}
diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml
deleted file mode 100644
index 6326d40f0..000000000
--- a/vendor/github.com/shopspring/decimal/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-language: go
-
-arch:
- - amd64
- - ppc64le
-
-go:
- - 1.7.x
- - 1.14.x
- - 1.15.x
- - 1.16.x
- - 1.17.x
- - tip
-
-install:
- - go build .
-
-script:
- - go test -v
diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md
index aea61154b..432d0fd4e 100644
--- a/vendor/github.com/shopspring/decimal/CHANGELOG.md
+++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md
@@ -1,3 +1,30 @@
+## Decimal v1.4.0
+#### BREAKING
+- Drop support for Go version older than 1.10 [#361](https://github.com/shopspring/decimal/pull/361)
+
+#### FEATURES
+- Add implementation of natural logarithm [#339](https://github.com/shopspring/decimal/pull/339) [#357](https://github.com/shopspring/decimal/pull/357)
+- Add improved implementation of power operation [#358](https://github.com/shopspring/decimal/pull/358)
+- Add Compare method which forwards calls to Cmp [#346](https://github.com/shopspring/decimal/pull/346)
+- Add NewFromBigRat constructor [#288](https://github.com/shopspring/decimal/pull/288)
+- Add NewFromUint64 constructor [#352](https://github.com/shopspring/decimal/pull/352)
+
+#### ENHANCEMENTS
+- Migrate to Github Actions [#245](https://github.com/shopspring/decimal/pull/245) [#340](https://github.com/shopspring/decimal/pull/340)
+- Fix examples for RoundDown, RoundFloor, RoundUp, and RoundCeil [#285](https://github.com/shopspring/decimal/pull/285) [#328](https://github.com/shopspring/decimal/pull/328) [#341](https://github.com/shopspring/decimal/pull/341)
+- Use Godoc standard to mark deprecated Equals and StringScaled methods [#342](https://github.com/shopspring/decimal/pull/342)
+- Removed unnecessary min function for RescalePair method [#265](https://github.com/shopspring/decimal/pull/265)
+- Avoid reallocation of initial slice in MarshalBinary (GobEncode) [#355](https://github.com/shopspring/decimal/pull/355)
+- Optimize NumDigits method [#301](https://github.com/shopspring/decimal/pull/301) [#356](https://github.com/shopspring/decimal/pull/356)
+- Optimize BigInt method [#359](https://github.com/shopspring/decimal/pull/359)
+- Support scanning uint64 [#131](https://github.com/shopspring/decimal/pull/131) [#364](https://github.com/shopspring/decimal/pull/364)
+- Add docs section with alternative libraries [#363](https://github.com/shopspring/decimal/pull/363)
+
+#### BUGFIXES
+- Fix incorrect calculation of decimal modulo [#258](https://github.com/shopspring/decimal/pull/258) [#317](https://github.com/shopspring/decimal/pull/317)
+- Allocate new(big.Int) in Copy method to deeply clone it [#278](https://github.com/shopspring/decimal/pull/278)
+- Fix overflow edge case in QuoRem method [#322](https://github.com/shopspring/decimal/pull/322)
+
## Decimal v1.3.1
#### ENHANCEMENTS
diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md
index 2e35df068..318c9df58 100644
--- a/vendor/github.com/shopspring/decimal/README.md
+++ b/vendor/github.com/shopspring/decimal/README.md
@@ -1,6 +1,8 @@
# decimal
-[](https://app.travis-ci.com/shopspring/decimal) [](https://godoc.org/github.com/shopspring/decimal) [](https://goreportcard.com/report/github.com/shopspring/decimal)
+[](https://github.com/shopspring/decimal/actions/workflows/ci.yml)
+[](https://godoc.org/github.com/shopspring/decimal)
+[](https://goreportcard.com/report/github.com/shopspring/decimal)
Arbitrary-precision fixed-point decimal numbers in go.
@@ -20,7 +22,12 @@ Run `go get github.com/shopspring/decimal`
## Requirements
-Decimal library requires Go version `>=1.7`
+Decimal library requires Go version `>=1.10`
+
+## Documentation
+
+http://godoc.org/github.com/shopspring/decimal
+
## Usage
@@ -57,14 +64,16 @@ func main() {
}
```
-## Documentation
-
-http://godoc.org/github.com/shopspring/decimal
+## Alternative libraries
-## Production Usage
+When working with decimal numbers, you might face problems this library is not perfectly suited for.
+Fortunately, thanks to the wonderful community we have a dozen other libraries that you can choose from.
+Explore other alternatives to find the one that best fits your needs :)
-* [Spring](https://shopspring.com/), since August 14, 2014.
-* If you are using this in production, please let us know!
+* [cockroachdb/apd](https://github.com/cockroachdb/apd) - arbitrary precision, mutable and rich API similar to `big.Int`, more performant than this library
+* [alpacahq/alpacadecimal](https://github.com/alpacahq/alpacadecimal) - high performance, low precision (12 digits), fully compatible API with this library
+* [govalues/decimal](https://github.com/govalues/decimal) - high performance, zero-allocation, low precision (19 digits)
+* [greatcloak/decimal](https://github.com/greatcloak/decimal) - fork focusing on billing and e-commerce web application related use cases, includes out-of-the-box BSON marshaling support
## FAQ
diff --git a/vendor/github.com/shopspring/decimal/const.go b/vendor/github.com/shopspring/decimal/const.go
new file mode 100644
index 000000000..e5d6fa87e
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/const.go
@@ -0,0 +1,63 @@
+package decimal
+
+import (
+ "strings"
+)
+
+const (
+ strLn10 = "2.302585092994045684017991454684364207601101488628772976033327900967572609677352480235997205089598298341967784042286248633409525465082806756666287369098781689482907208325554680843799894826233198528393505308965377732628846163366222287698219886746543667474404243274365155048934314939391479619404400222105101714174800368808401264708068556774321622835522011480466371565912137345074785694768346361679210180644507064800027750268491674655058685693567342067058113642922455440575892572420824131469568901675894025677631135691929203337658714166023010570308963457207544037084746994016826928280848118428931484852494864487192780967627127577539702766860595249671667418348570442250719796500471495105049221477656763693866297697952211071826454973477266242570942932258279850258550978526538320760672631716430950599508780752371033310119785754733154142180842754386359177811705430982748238504564801909561029929182431823752535770975053956518769751037497088869218020518933950723853920514463419726528728696511086257149219884997874887377134568620916705849807828059751193854445009978131146915934666241071846692310107598438319191292230792503747298650929009880391941702654416816335727555703151596113564846546190897042819763365836983716328982174407366009162177850541779276367731145041782137660111010731042397832521894898817597921798666394319523936855916447118246753245630912528778330963604262982153040874560927760726641354787576616262926568298704957954913954918049209069438580790032763017941503117866862092408537949861264933479354871737451675809537088281067452440105892444976479686075120275724181874989395971643105518848195288330746699317814634930000321200327765654130472621883970596794457943468343218395304414844803701305753674262153675579814770458031413637793236291560128185336498466942261465206459942072917119370602444929358037007718981097362533224548366988505528285966192805098447175198503666680874970496982273220244823343097169111136813588418696549323714996941979687803008850408979618598756579894836445212043698216415292987811742973332588607915912510967187510929248475023930572665446276200923068791518135803477701295593646298412366497023355174586195564772461857717369368404676577047874319780573853271810933883496338813069945569399346101090745616033312247949360455361849123333063704751724871276379140924398331810164737823379692265637682071706935846394531616949411701841938119405416449466111274712819705817783293841742231409930022911502362192186723337268385688273533371925103412930705632544426611429765388301822384091026198582888433587455960453004548370789052578473166283701953392231047527564998119228742789713715713228319641003422124210082180679525276689858180956119208391760721080919923461516952599099473782780648128058792731993893453415320185969711021407542282796298237068941764740642225757212455392526179373652434440560595336591539160312524480149313234572453879524389036839236450507881731359711238145323701508413491122324390927681724749607955799151363982881058285740538000653371655553014196332241918087621018204919492651483892"
+)
+
+var (
+ ln10 = newConstApproximation(strLn10)
+)
+
+type constApproximation struct {
+ exact Decimal
+ approximations []Decimal
+}
+
+func newConstApproximation(value string) constApproximation {
+ parts := strings.Split(value, ".")
+ coeff, fractional := parts[0], parts[1]
+
+ coeffLen := len(coeff)
+ maxPrecision := len(fractional)
+
+ var approximations []Decimal
+ for p := 1; p < maxPrecision; p *= 2 {
+ r := RequireFromString(value[:coeffLen+p])
+ approximations = append(approximations, r)
+ }
+
+ return constApproximation{
+ RequireFromString(value),
+ approximations,
+ }
+}
+
+// Returns the smallest approximation available that's at least as precise
+// as the passed precision (places after decimal point), i.e. Floor[ log2(precision) ] + 1
+func (c constApproximation) withPrecision(precision int32) Decimal {
+ i := 0
+
+ if precision >= 1 {
+ i++
+ }
+
+ for precision >= 16 {
+ precision /= 16
+ i += 4
+ }
+
+ for precision >= 2 {
+ precision /= 2
+ i++
+ }
+
+ if i >= len(c.approximations) {
+ return c.exact
+ }
+
+ return c.approximations[i]
+}
diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go
index 84405ec1c..a37a2301e 100644
--- a/vendor/github.com/shopspring/decimal/decimal.go
+++ b/vendor/github.com/shopspring/decimal/decimal.go
@@ -4,14 +4,14 @@
//
// The best way to create a new Decimal is to use decimal.NewFromString, ex:
//
-// n, err := decimal.NewFromString("-123.4567")
-// n.String() // output: "-123.4567"
+// n, err := decimal.NewFromString("-123.4567")
+// n.String() // output: "-123.4567"
//
// To use Decimal as part of a struct:
//
-// type Struct struct {
-// Number Decimal
-// }
+// type StructName struct {
+// Number Decimal
+// }
//
// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point.
package decimal
@@ -32,18 +32,31 @@ import (
//
// Example:
//
-// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3))
-// d1.String() // output: "0.6666666666666667"
-// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000))
-// d2.String() // output: "0.0000666666666667"
-// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3))
-// d3.String() // output: "6666.6666666666666667"
-// decimal.DivisionPrecision = 3
-// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3))
-// d4.String() // output: "0.667"
-//
+// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3))
+// d1.String() // output: "0.6666666666666667"
+// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000))
+// d2.String() // output: "0.0000666666666667"
+// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3))
+// d3.String() // output: "6666.6666666666666667"
+// decimal.DivisionPrecision = 3
+// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3))
+// d4.String() // output: "0.667"
var DivisionPrecision = 16
+// PowPrecisionNegativeExponent specifies the maximum precision of the result (digits after decimal point)
+// when calculating decimal power. Only used for cases where the exponent is a negative number.
+// This constant applies to Pow, PowInt32 and PowBigInt methods, PowWithPrecision method is not constrained by it.
+//
+// Example:
+//
+// d1, err := decimal.NewFromFloat(15.2).PowInt32(-2)
+// d1.String() // output: "0.0043282548476454"
+//
+// decimal.PowPrecisionNegativeExponent = 24
+// d2, err := decimal.NewFromFloat(15.2).PowInt32(-2)
+// d2.String() // output: "0.004328254847645429362881"
+var PowPrecisionNegativeExponent = 16
+
// MarshalJSONWithoutQuotes should be set to true if you want the decimal to
// be JSON marshaled as a number, instead of as a string.
// WARNING: this is dangerous for decimals with many digits, since many JSON
@@ -91,12 +104,12 @@ func New(value int64, exp int32) Decimal {
}
}
-// NewFromInt converts a int64 to Decimal.
+// NewFromInt converts an int64 to Decimal.
//
// Example:
//
-// NewFromInt(123).String() // output: "123"
-// NewFromInt(-10).String() // output: "-10"
+// NewFromInt(123).String() // output: "123"
+// NewFromInt(-10).String() // output: "-10"
func NewFromInt(value int64) Decimal {
return Decimal{
value: big.NewInt(value),
@@ -104,12 +117,12 @@ func NewFromInt(value int64) Decimal {
}
}
-// NewFromInt32 converts a int32 to Decimal.
+// NewFromInt32 converts an int32 to Decimal.
//
// Example:
//
-// NewFromInt(123).String() // output: "123"
-// NewFromInt(-10).String() // output: "-10"
+// NewFromInt(123).String() // output: "123"
+// NewFromInt(-10).String() // output: "-10"
func NewFromInt32(value int32) Decimal {
return Decimal{
value: big.NewInt(int64(value)),
@@ -117,6 +130,18 @@ func NewFromInt32(value int32) Decimal {
}
}
+// NewFromUint64 converts an uint64 to Decimal.
+//
+// Example:
+//
+// NewFromUint64(123).String() // output: "123"
+func NewFromUint64(value uint64) Decimal {
+ return Decimal{
+ value: new(big.Int).SetUint64(value),
+ exp: 0,
+ }
+}
+
// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp
func NewFromBigInt(value *big.Int, exp int32) Decimal {
return Decimal{
@@ -125,15 +150,33 @@ func NewFromBigInt(value *big.Int, exp int32) Decimal {
}
}
+// NewFromBigRat returns a new Decimal from a big.Rat. The numerator and
+// denominator are divided and rounded to the given precision.
+//
+// Example:
+//
+// d1 := NewFromBigRat(big.NewRat(0, 1), 0) // output: "0"
+// d2 := NewFromBigRat(big.NewRat(4, 5), 1) // output: "0.8"
+// d3 := NewFromBigRat(big.NewRat(1000, 3), 3) // output: "333.333"
+// d4 := NewFromBigRat(big.NewRat(2, 7), 4) // output: "0.2857"
+func NewFromBigRat(value *big.Rat, precision int32) Decimal {
+ return Decimal{
+ value: new(big.Int).Set(value.Num()),
+ exp: 0,
+ }.DivRound(Decimal{
+ value: new(big.Int).Set(value.Denom()),
+ exp: 0,
+ }, precision)
+}
+
// NewFromString returns a new Decimal from a string representation.
// Trailing zeroes are not trimmed.
//
// Example:
//
-// d, err := NewFromString("-123.45")
-// d2, err := NewFromString(".0001")
-// d3, err := NewFromString("1.47000")
-//
+// d, err := NewFromString("-123.45")
+// d2, err := NewFromString(".0001")
+// d3, err := NewFromString("1.47000")
func NewFromString(value string) (Decimal, error) {
originalInput := value
var intString string
@@ -211,15 +254,14 @@ func NewFromString(value string) (Decimal, error) {
//
// Example:
//
-// r := regexp.MustCompile("[$,]")
-// d1, err := NewFromFormattedString("$5,125.99", r)
+// r := regexp.MustCompile("[$,]")
+// d1, err := NewFromFormattedString("$5,125.99", r)
//
-// r2 := regexp.MustCompile("[_]")
-// d2, err := NewFromFormattedString("1_000_000", r2)
-//
-// r3 := regexp.MustCompile("[USD\\s]")
-// d3, err := NewFromFormattedString("5000 USD", r3)
+// r2 := regexp.MustCompile("[_]")
+// d2, err := NewFromFormattedString("1_000_000", r2)
//
+// r3 := regexp.MustCompile("[USD\\s]")
+// d3, err := NewFromFormattedString("5000 USD", r3)
func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) {
parsedValue := replRegexp.ReplaceAllString(value, "")
d, err := NewFromString(parsedValue)
@@ -230,13 +272,12 @@ func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, e
}
// RequireFromString returns a new Decimal from a string representation
-// or panics if NewFromString would have returned an error.
+// or panics if NewFromString had returned an error.
//
// Example:
//
-// d := RequireFromString("-123.45")
-// d2 := RequireFromString(".0001")
-//
+// d := RequireFromString("-123.45")
+// d2 := RequireFromString(".0001")
func RequireFromString(value string) Decimal {
dec, err := NewFromString(value)
if err != nil {
@@ -332,8 +373,7 @@ func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal {
//
// Example:
//
-// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46"
-//
+// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46"
func NewFromFloatWithExponent(value float64, exp int32) Decimal {
if math.IsNaN(value) || math.IsInf(value, 0) {
panic(fmt.Sprintf("Cannot create a Decimal from %v", value))
@@ -418,7 +458,7 @@ func NewFromFloatWithExponent(value float64, exp int32) Decimal {
func (d Decimal) Copy() Decimal {
d.ensureInitialized()
return Decimal{
- value: &(*d.value),
+ value: new(big.Int).Set(d.value),
exp: d.exp,
}
}
@@ -430,7 +470,7 @@ func (d Decimal) Copy() Decimal {
//
// Example:
//
-// d := New(12345, -4)
+// d := New(12345, -4)
// d2 := d.rescale(-1)
// d3 := d2.rescale(-4)
// println(d1)
@@ -442,7 +482,6 @@ func (d Decimal) Copy() Decimal {
// 1.2345
// 1.2
// 1.2000
-//
func (d Decimal) rescale(exp int32) Decimal {
d.ensureInitialized()
@@ -552,11 +591,13 @@ func (d Decimal) Div(d2 Decimal) Decimal {
return d.DivRound(d2, int32(DivisionPrecision))
}
-// QuoRem does divsion with remainder
+// QuoRem does division with remainder
// d.QuoRem(d2,precision) returns quotient q and remainder r such that
-// d = d2 * q + r, q an integer multiple of 10^(-precision)
-// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0
-// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0
+//
+// d = d2 * q + r, q an integer multiple of 10^(-precision)
+// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0
+// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0
+//
// Note that precision<0 is allowed as input.
func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) {
d.ensureInitialized()
@@ -565,7 +606,7 @@ func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) {
panic("decimal division by 0")
}
scale := -precision
- e := int64(d.exp - d2.exp - scale)
+ e := int64(d.exp) - int64(d2.exp) - int64(scale)
if e > math.MaxInt32 || e < math.MinInt32 {
panic("overflow in decimal QuoRem")
}
@@ -599,8 +640,10 @@ func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) {
// DivRound divides and rounds to a given precision
// i.e. to an integer multiple of 10^(-precision)
-// for a positive quotient digit 5 is rounded up, away from 0
-// if the quotient is negative then digit 5 is rounded down, away from 0
+//
+// for a positive quotient digit 5 is rounded up, away from 0
+// if the quotient is negative then digit 5 is rounded down, away from 0
+//
// Note that precision<0 is allowed as input.
func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal {
// QuoRem already checks initialization
@@ -628,24 +671,278 @@ func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal {
// Mod returns d % d2.
func (d Decimal) Mod(d2 Decimal) Decimal {
- quo := d.Div(d2).Truncate(0)
- return d.Sub(d2.Mul(quo))
+ _, r := d.QuoRem(d2, 0)
+ return r
}
-// Pow returns d to the power d2
+// Pow returns d to the power of d2.
+// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point.
+//
+// Pow returns 0 (zero-value of Decimal) instead of error for power operation edge cases, to handle those edge cases use PowWithPrecision
+// Edge cases not handled by Pow:
+// - 0 ** 0 => undefined value
+// - 0 ** y, where y < 0 => infinity
+// - x ** y, where x < 0 and y is non-integer decimal => imaginary value
+//
+// Example:
+//
+// d1 := decimal.NewFromFloat(4.0)
+// d2 := decimal.NewFromFloat(4.0)
+// res1 := d1.Pow(d2)
+// res1.String() // output: "256"
+//
+// d3 := decimal.NewFromFloat(5.0)
+// d4 := decimal.NewFromFloat(5.73)
+// res2 := d3.Pow(d4)
+// res2.String() // output: "10118.08037125"
func (d Decimal) Pow(d2 Decimal) Decimal {
- var temp Decimal
- if d2.IntPart() == 0 {
- return NewFromFloat(1)
+ baseSign := d.Sign()
+ expSign := d2.Sign()
+
+ if baseSign == 0 {
+ if expSign == 0 {
+ return Decimal{}
+ }
+ if expSign == 1 {
+ return Decimal{zeroInt, 0}
+ }
+ if expSign == -1 {
+ return Decimal{}
+ }
+ }
+
+ if expSign == 0 {
+ return Decimal{oneInt, 0}
+ }
+
+ // TODO: optimize extraction of fractional part
+ one := Decimal{oneInt, 0}
+ expIntPart, expFracPart := d2.QuoRem(one, 0)
+
+ if baseSign == -1 && !expFracPart.IsZero() {
+ return Decimal{}
+ }
+
+ intPartPow, _ := d.PowBigInt(expIntPart.value)
+
+ // if exponent is an integer we don't need to calculate d1**frac(d2)
+ if expFracPart.value.Sign() == 0 {
+ return intPartPow
+ }
+
+ // TODO: optimize NumDigits for more performant precision adjustment
+ digitsBase := d.NumDigits()
+ digitsExponent := d2.NumDigits()
+
+ precision := digitsBase
+
+ if digitsExponent > precision {
+ precision += digitsExponent
+ }
+
+ precision += 6
+
+ // Calculate x ** frac(y), where
+ // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y))
+ fracPartPow, err := d.Abs().Ln(-d.exp + int32(precision))
+ if err != nil {
+ return Decimal{}
+ }
+
+ fracPartPow = fracPartPow.Mul(expFracPart)
+
+ fracPartPow, err = fracPartPow.ExpTaylor(-d.exp + int32(precision))
+ if err != nil {
+ return Decimal{}
+ }
+
+ // Join integer and fractional part,
+ // base ** (expBase + expFrac) = base ** expBase * base ** expFrac
+ res := intPartPow.Mul(fracPartPow)
+
+ return res
+}
+
+// PowWithPrecision returns d to the power of d2.
+// Precision parameter specifies minimum precision of the result (digits after decimal point).
+// Returned decimal is not rounded to 'precision' places after decimal point.
+//
+// PowWithPrecision returns error when:
+// - 0 ** 0 => undefined value
+// - 0 ** y, where y < 0 => infinity
+// - x ** y, where x < 0 and y is non-integer decimal => imaginary value
+//
+// Example:
+//
+// d1 := decimal.NewFromFloat(4.0)
+// d2 := decimal.NewFromFloat(4.0)
+// res1, err := d1.PowWithPrecision(d2, 2)
+// res1.String() // output: "256"
+//
+// d3 := decimal.NewFromFloat(5.0)
+// d4 := decimal.NewFromFloat(5.73)
+// res2, err := d3.PowWithPrecision(d4, 5)
+// res2.String() // output: "10118.080371595015625"
+//
+// d5 := decimal.NewFromFloat(-3.0)
+// d6 := decimal.NewFromFloat(-6.0)
+// res3, err := d5.PowWithPrecision(d6, 10)
+// res3.String() // output: "0.0013717421"
+func (d Decimal) PowWithPrecision(d2 Decimal, precision int32) (Decimal, error) {
+ baseSign := d.Sign()
+ expSign := d2.Sign()
+
+ if baseSign == 0 {
+ if expSign == 0 {
+ return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0")
+ }
+ if expSign == 1 {
+ return Decimal{zeroInt, 0}, nil
+ }
+ if expSign == -1 {
+ return Decimal{}, fmt.Errorf("cannot represent infinity value of 0 ** y, where y < 0")
+ }
+ }
+
+ if expSign == 0 {
+ return Decimal{oneInt, 0}, nil
+ }
+
+ // TODO: optimize extraction of fractional part
+ one := Decimal{oneInt, 0}
+ expIntPart, expFracPart := d2.QuoRem(one, 0)
+
+ if baseSign == -1 && !expFracPart.IsZero() {
+ return Decimal{}, fmt.Errorf("cannot represent imaginary value of x ** y, where x < 0 and y is non-integer decimal")
+ }
+
+ intPartPow, _ := d.powBigIntWithPrecision(expIntPart.value, precision)
+
+ // if exponent is an integer we don't need to calculate d1**frac(d2)
+ if expFracPart.value.Sign() == 0 {
+ return intPartPow, nil
+ }
+
+ // TODO: optimize NumDigits for more performant precision adjustment
+ digitsBase := d.NumDigits()
+ digitsExponent := d2.NumDigits()
+
+ if int32(digitsBase) > precision {
+ precision = int32(digitsBase)
+ }
+ if int32(digitsExponent) > precision {
+ precision += int32(digitsExponent)
+ }
+ // increase precision by 10 to compensate for errors in further calculations
+ precision += 10
+
+ // Calculate x ** frac(y), where
+ // x ** frac(y) = exp(ln(x ** frac(y)) = exp(ln(x) * frac(y))
+ fracPartPow, err := d.Abs().Ln(precision)
+ if err != nil {
+ return Decimal{}, err
+ }
+
+ fracPartPow = fracPartPow.Mul(expFracPart)
+
+ fracPartPow, err = fracPartPow.ExpTaylor(precision)
+ if err != nil {
+ return Decimal{}, err
+ }
+
+ // Join integer and fractional part,
+ // base ** (expBase + expFrac) = base ** expBase * base ** expFrac
+ res := intPartPow.Mul(fracPartPow)
+
+ return res, nil
+}
+
+// PowInt32 returns d to the power of exp, where exp is int32.
+// Only returns error when d and exp is 0, thus result is undefined.
+//
+// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point.
+//
+// Example:
+//
+// d1, err := decimal.NewFromFloat(4.0).PowInt32(4)
+// d1.String() // output: "256"
+//
+// d2, err := decimal.NewFromFloat(3.13).PowInt32(5)
+// d2.String() // output: "300.4150512793"
+func (d Decimal) PowInt32(exp int32) (Decimal, error) {
+ if d.IsZero() && exp == 0 {
+ return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0")
+ }
+
+ isExpNeg := exp < 0
+ exp = abs(exp)
+
+ n, result := d, New(1, 0)
+
+ for exp > 0 {
+ if exp%2 == 1 {
+ result = result.Mul(n)
+ }
+ exp /= 2
+
+ if exp > 0 {
+ n = n.Mul(n)
+ }
+ }
+
+ if isExpNeg {
+ return New(1, 0).DivRound(result, int32(PowPrecisionNegativeExponent)), nil
+ }
+
+ return result, nil
+}
+
+// PowBigInt returns d to the power of exp, where exp is big.Int.
+// Only returns error when d and exp is 0, thus result is undefined.
+//
+// When exponent is negative the returned decimal will have maximum precision of PowPrecisionNegativeExponent places after decimal point.
+//
+// Example:
+//
+// d1, err := decimal.NewFromFloat(3.0).PowBigInt(big.NewInt(3))
+// d1.String() // output: "27"
+//
+// d2, err := decimal.NewFromFloat(629.25).PowBigInt(big.NewInt(5))
+// d2.String() // output: "98654323103449.5673828125"
+func (d Decimal) PowBigInt(exp *big.Int) (Decimal, error) {
+ return d.powBigIntWithPrecision(exp, int32(PowPrecisionNegativeExponent))
+}
+
+func (d Decimal) powBigIntWithPrecision(exp *big.Int, precision int32) (Decimal, error) {
+ if d.IsZero() && exp.Sign() == 0 {
+ return Decimal{}, fmt.Errorf("cannot represent undefined value of 0**0")
}
- temp = d.Pow(d2.Div(NewFromFloat(2)))
- if d2.IntPart()%2 == 0 {
- return temp.Mul(temp)
+
+ tmpExp := new(big.Int).Set(exp)
+ isExpNeg := exp.Sign() < 0
+
+ if isExpNeg {
+ tmpExp.Abs(tmpExp)
+ }
+
+ n, result := d, New(1, 0)
+
+ for tmpExp.Sign() > 0 {
+ if tmpExp.Bit(0) == 1 {
+ result = result.Mul(n)
+ }
+ tmpExp.Rsh(tmpExp, 1)
+
+ if tmpExp.Sign() > 0 {
+ n = n.Mul(n)
+ }
}
- if d2.IntPart() > 0 {
- return temp.Mul(temp).Mul(d)
+
+ if isExpNeg {
+ return New(1, 0).DivRound(result, precision), nil
}
- return temp.Mul(temp).Div(d)
+
+ return result, nil
}
// ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm.
@@ -655,9 +952,8 @@ func (d Decimal) Pow(d2 Decimal) Decimal {
//
// Example:
//
-// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000"
-// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284"
-//
+// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000"
+// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284"
func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) {
// Algorithm based on Variable precision exponential function.
// ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham.
@@ -747,15 +1043,14 @@ func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) {
//
// Example:
//
-// d, err := NewFromFloat(26.1).ExpTaylor(2).String()
-// d.String() // output: "216314672147.06"
-//
-// NewFromFloat(26.1).ExpTaylor(20).String()
-// d.String() // output: "216314672147.05767284062928674083"
+// d, err := NewFromFloat(26.1).ExpTaylor(2).String()
+// d.String() // output: "216314672147.06"
//
-// NewFromFloat(26.1).ExpTaylor(-10).String()
-// d.String() // output: "220000000000"
+// NewFromFloat(26.1).ExpTaylor(20).String()
+// d.String() // output: "216314672147.05767284062928674083"
//
+// NewFromFloat(26.1).ExpTaylor(-10).String()
+// d.String() // output: "220000000000"
func (d Decimal) ExpTaylor(precision int32) (Decimal, error) {
// Note(mwoss): Implementation can be optimized by exclusively using big.Int API only
if d.IsZero() {
@@ -812,14 +1107,162 @@ func (d Decimal) ExpTaylor(precision int32) (Decimal, error) {
return result, nil
}
+// Ln calculates natural logarithm of d.
+// Precision argument specifies how precise the result must be (number of digits after decimal point).
+// Negative precision is allowed.
+//
+// Example:
+//
+// d1, err := NewFromFloat(13.3).Ln(2)
+// d1.String() // output: "2.59"
+//
+// d2, err := NewFromFloat(579.161).Ln(10)
+// d2.String() // output: "6.3615805046"
+func (d Decimal) Ln(precision int32) (Decimal, error) {
+ // Algorithm based on The Use of Iteration Methods for Approximating the Natural Logarithm,
+ // James F. Epperson, The American Mathematical Monthly, Vol. 96, No. 9, November 1989, pp. 831-835.
+ if d.IsNegative() {
+ return Decimal{}, fmt.Errorf("cannot calculate natural logarithm for negative decimals")
+ }
+
+ if d.IsZero() {
+ return Decimal{}, fmt.Errorf("cannot represent natural logarithm of 0, result: -infinity")
+ }
+
+ calcPrecision := precision + 2
+ z := d.Copy()
+
+ var comp1, comp3, comp2, comp4, reduceAdjust Decimal
+ comp1 = z.Sub(Decimal{oneInt, 0})
+ comp3 = Decimal{oneInt, -1}
+
+ // for decimal in range [0.9, 1.1] where ln(d) is close to 0
+ usePowerSeries := false
+
+ if comp1.Abs().Cmp(comp3) <= 0 {
+ usePowerSeries = true
+ } else {
+ // reduce input decimal to range [0.1, 1)
+ expDelta := int32(z.NumDigits()) + z.exp
+ z.exp -= expDelta
+
+ // Input decimal was reduced by factor of 10^expDelta, thus we will need to add
+ // ln(10^expDelta) = expDelta * ln(10)
+ // to the result to compensate that
+ ln10 := ln10.withPrecision(calcPrecision)
+ reduceAdjust = NewFromInt32(expDelta)
+ reduceAdjust = reduceAdjust.Mul(ln10)
+
+ comp1 = z.Sub(Decimal{oneInt, 0})
+
+ if comp1.Abs().Cmp(comp3) <= 0 {
+ usePowerSeries = true
+ } else {
+ // initial estimate using floats
+ zFloat := z.InexactFloat64()
+ comp1 = NewFromFloat(math.Log(zFloat))
+ }
+ }
+
+ epsilon := Decimal{oneInt, -calcPrecision}
+
+ if usePowerSeries {
+ // Power Series - https://en.wikipedia.org/wiki/Logarithm#Power_series
+ // Calculating n-th term of formula: ln(z+1) = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ]
+ // until the difference between current and next term is smaller than epsilon.
+ // Coverage quite fast for decimals close to 1.0
+
+ // z + 2
+ comp2 = comp1.Add(Decimal{twoInt, 0})
+ // z / (z + 2)
+ comp3 = comp1.DivRound(comp2, calcPrecision)
+ // 2 * (z / (z + 2))
+ comp1 = comp3.Add(comp3)
+ comp2 = comp1.Copy()
+
+ for n := 1; ; n++ {
+ // 2 * (z / (z+2))^(2n+1)
+ comp2 = comp2.Mul(comp3).Mul(comp3)
+
+ // 1 / (2n+1) * 2 * (z / (z+2))^(2n+1)
+ comp4 = NewFromInt(int64(2*n + 1))
+ comp4 = comp2.DivRound(comp4, calcPrecision)
+
+ // comp1 = 2 sum [ 1 / (2n+1) * (z / (z+2))^(2n+1) ]
+ comp1 = comp1.Add(comp4)
+
+ if comp4.Abs().Cmp(epsilon) <= 0 {
+ break
+ }
+ }
+ } else {
+ // Halley's Iteration.
+ // Calculating n-th term of formula: a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z),
+ // until the difference between current and next term is smaller than epsilon
+ var prevStep Decimal
+ maxIters := calcPrecision*2 + 10
+
+ for i := int32(0); i < maxIters; i++ {
+ // exp(a_n)
+ comp3, _ = comp1.ExpTaylor(calcPrecision)
+ // exp(a_n) - z
+ comp2 = comp3.Sub(z)
+ // 2 * (exp(a_n) - z)
+ comp2 = comp2.Add(comp2)
+ // exp(a_n) + z
+ comp4 = comp3.Add(z)
+ // 2 * (exp(a_n) - z) / (exp(a_n) + z)
+ comp3 = comp2.DivRound(comp4, calcPrecision)
+ // comp1 = a_(n+1) = a_n - 2 * (exp(a_n) - z) / (exp(a_n) + z)
+ comp1 = comp1.Sub(comp3)
+
+ if prevStep.Add(comp3).IsZero() {
+ // If iteration steps oscillate we should return early and prevent an infinity loop
+ // NOTE(mwoss): This should be quite a rare case, returning error is not necessary
+ break
+ }
+
+ if comp3.Abs().Cmp(epsilon) <= 0 {
+ break
+ }
+
+ prevStep = comp3
+ }
+ }
+
+ comp1 = comp1.Add(reduceAdjust)
+
+ return comp1.Round(precision), nil
+}
+
// NumDigits returns the number of digits of the decimal coefficient (d.Value)
-// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part
func (d Decimal) NumDigits() int {
- // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string
- if d.IsNegative() {
- return len(d.value.String()) - 1
+ if d.value == nil {
+ return 1
+ }
+
+ if d.value.IsInt64() {
+ i64 := d.value.Int64()
+ // restrict fast path to integers with exact conversion to float64
+ if i64 <= (1<<53) && i64 >= -(1<<53) {
+ if i64 == 0 {
+ return 1
+ }
+ return int(math.Log10(math.Abs(float64(i64)))) + 1
+ }
+ }
+
+ estimatedNumDigits := int(float64(d.value.BitLen()) / math.Log2(10))
+
+ // estimatedNumDigits (lg10) may be off by 1, need to verify
+ digitsBigInt := big.NewInt(int64(estimatedNumDigits))
+ errorCorrectionUnit := digitsBigInt.Exp(tenInt, digitsBigInt, nil)
+
+ if d.value.CmpAbs(errorCorrectionUnit) >= 0 {
+ return estimatedNumDigits + 1
}
- return len(d.value.String())
+
+ return estimatedNumDigits
}
// IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false.
@@ -851,10 +1294,9 @@ func abs(n int32) int32 {
// Cmp compares the numbers represented by d and d2 and returns:
//
-// -1 if d < d2
-// 0 if d == d2
-// +1 if d > d2
-//
+// -1 if d < d2
+// 0 if d == d2
+// +1 if d > d2
func (d Decimal) Cmp(d2 Decimal) int {
d.ensureInitialized()
d2.ensureInitialized()
@@ -868,12 +1310,21 @@ func (d Decimal) Cmp(d2 Decimal) int {
return rd.value.Cmp(rd2.value)
}
+// Compare compares the numbers represented by d and d2 and returns:
+//
+// -1 if d < d2
+// 0 if d == d2
+// +1 if d > d2
+func (d Decimal) Compare(d2 Decimal) int {
+ return d.Cmp(d2)
+}
+
// Equal returns whether the numbers represented by d and d2 are equal.
func (d Decimal) Equal(d2 Decimal) bool {
return d.Cmp(d2) == 0
}
-// Equals is deprecated, please use Equal method instead
+// Deprecated: Equals is deprecated, please use Equal method instead.
func (d Decimal) Equals(d2 Decimal) bool {
return d.Equal(d2)
}
@@ -905,7 +1356,6 @@ func (d Decimal) LessThanOrEqual(d2 Decimal) bool {
// -1 if d < 0
// 0 if d == 0
// +1 if d > 0
-//
func (d Decimal) Sign() int {
if d.value == nil {
return 0
@@ -968,9 +1418,7 @@ func (d Decimal) IntPart() int64 {
// BigInt returns integer component of the decimal as a BigInt.
func (d Decimal) BigInt() *big.Int {
scaledD := d.rescale(0)
- i := &big.Int{}
- i.SetString(scaledD.String(), 10)
- return i
+ return scaledD.value
}
// BigFloat returns decimal as BigFloat.
@@ -1014,13 +1462,12 @@ func (d Decimal) InexactFloat64() float64 {
//
// Example:
//
-// d := New(-12345, -3)
-// println(d.String())
+// d := New(-12345, -3)
+// println(d.String())
//
// Output:
//
-// -12.345
-//
+// -12.345
func (d Decimal) String() string {
return d.string(true)
}
@@ -1030,14 +1477,13 @@ func (d Decimal) String() string {
//
// Example:
//
-// NewFromFloat(0).StringFixed(2) // output: "0.00"
-// NewFromFloat(0).StringFixed(0) // output: "0"
-// NewFromFloat(5.45).StringFixed(0) // output: "5"
-// NewFromFloat(5.45).StringFixed(1) // output: "5.5"
-// NewFromFloat(5.45).StringFixed(2) // output: "5.45"
-// NewFromFloat(5.45).StringFixed(3) // output: "5.450"
-// NewFromFloat(545).StringFixed(-1) // output: "550"
-//
+// NewFromFloat(0).StringFixed(2) // output: "0.00"
+// NewFromFloat(0).StringFixed(0) // output: "0"
+// NewFromFloat(5.45).StringFixed(0) // output: "5"
+// NewFromFloat(5.45).StringFixed(1) // output: "5.5"
+// NewFromFloat(5.45).StringFixed(2) // output: "5.45"
+// NewFromFloat(5.45).StringFixed(3) // output: "5.450"
+// NewFromFloat(545).StringFixed(-1) // output: "550"
func (d Decimal) StringFixed(places int32) string {
rounded := d.Round(places)
return rounded.string(false)
@@ -1048,14 +1494,13 @@ func (d Decimal) StringFixed(places int32) string {
//
// Example:
//
-// NewFromFloat(0).StringFixedBank(2) // output: "0.00"
-// NewFromFloat(0).StringFixedBank(0) // output: "0"
-// NewFromFloat(5.45).StringFixedBank(0) // output: "5"
-// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4"
-// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45"
-// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450"
-// NewFromFloat(545).StringFixedBank(-1) // output: "540"
-//
+// NewFromFloat(0).StringFixedBank(2) // output: "0.00"
+// NewFromFloat(0).StringFixedBank(0) // output: "0"
+// NewFromFloat(5.45).StringFixedBank(0) // output: "5"
+// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4"
+// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45"
+// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450"
+// NewFromFloat(545).StringFixedBank(-1) // output: "540"
func (d Decimal) StringFixedBank(places int32) string {
rounded := d.RoundBank(places)
return rounded.string(false)
@@ -1073,9 +1518,8 @@ func (d Decimal) StringFixedCash(interval uint8) string {
//
// Example:
//
-// NewFromFloat(5.45).Round(1).String() // output: "5.5"
-// NewFromFloat(545).Round(-1).String() // output: "550"
-//
+// NewFromFloat(5.45).Round(1).String() // output: "5.5"
+// NewFromFloat(545).Round(-1).String() // output: "550"
func (d Decimal) Round(places int32) Decimal {
if d.exp == -places {
return d
@@ -1104,11 +1548,10 @@ func (d Decimal) Round(places int32) Decimal {
//
// Example:
//
-// NewFromFloat(545).RoundCeil(-2).String() // output: "600"
-// NewFromFloat(500).RoundCeil(-2).String() // output: "500"
-// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11"
-// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5"
-//
+// NewFromFloat(545).RoundCeil(-2).String() // output: "600"
+// NewFromFloat(500).RoundCeil(-2).String() // output: "500"
+// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11"
+// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.4"
func (d Decimal) RoundCeil(places int32) Decimal {
if d.exp >= -places {
return d
@@ -1130,11 +1573,10 @@ func (d Decimal) RoundCeil(places int32) Decimal {
//
// Example:
//
-// NewFromFloat(545).RoundFloor(-2).String() // output: "500"
-// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500"
-// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1"
-// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4"
-//
+// NewFromFloat(545).RoundFloor(-2).String() // output: "500"
+// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500"
+// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1"
+// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.5"
func (d Decimal) RoundFloor(places int32) Decimal {
if d.exp >= -places {
return d
@@ -1156,11 +1598,10 @@ func (d Decimal) RoundFloor(places int32) Decimal {
//
// Example:
//
-// NewFromFloat(545).RoundUp(-2).String() // output: "600"
-// NewFromFloat(500).RoundUp(-2).String() // output: "500"
-// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11"
-// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4"
-//
+// NewFromFloat(545).RoundUp(-2).String() // output: "600"
+// NewFromFloat(500).RoundUp(-2).String() // output: "500"
+// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11"
+// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.5"
func (d Decimal) RoundUp(places int32) Decimal {
if d.exp >= -places {
return d
@@ -1184,11 +1625,10 @@ func (d Decimal) RoundUp(places int32) Decimal {
//
// Example:
//
-// NewFromFloat(545).RoundDown(-2).String() // output: "500"
-// NewFromFloat(-500).RoundDown(-2).String() // output: "-500"
-// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1"
-// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5"
-//
+// NewFromFloat(545).RoundDown(-2).String() // output: "500"
+// NewFromFloat(-500).RoundDown(-2).String() // output: "-500"
+// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1"
+// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.4"
func (d Decimal) RoundDown(places int32) Decimal {
if d.exp >= -places {
return d
@@ -1209,13 +1649,12 @@ func (d Decimal) RoundDown(places int32) Decimal {
//
// Examples:
//
-// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4"
-// NewFromFloat(545).RoundBank(-1).String() // output: "540"
-// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5"
-// NewFromFloat(546).RoundBank(-1).String() // output: "550"
-// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6"
-// NewFromFloat(555).RoundBank(-1).String() // output: "560"
-//
+// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4"
+// NewFromFloat(545).RoundBank(-1).String() // output: "540"
+// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5"
+// NewFromFloat(546).RoundBank(-1).String() // output: "550"
+// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6"
+// NewFromFloat(555).RoundBank(-1).String() // output: "560"
func (d Decimal) RoundBank(places int32) Decimal {
round := d.Round(places)
@@ -1237,11 +1676,13 @@ func (d Decimal) RoundBank(places int32) Decimal {
// interval. The amount payable for a cash transaction is rounded to the nearest
// multiple of the minimum currency unit available. The following intervals are
// available: 5, 10, 25, 50 and 100; any other number throws a panic.
-// 5: 5 cent rounding 3.43 => 3.45
-// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up)
-// 25: 25 cent rounding 3.41 => 3.50
-// 50: 50 cent rounding 3.75 => 4.00
-// 100: 100 cent rounding 3.50 => 4.00
+//
+// 5: 5 cent rounding 3.43 => 3.45
+// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up)
+// 25: 25 cent rounding 3.41 => 3.50
+// 50: 50 cent rounding 3.75 => 4.00
+// 100: 100 cent rounding 3.50 => 4.00
+//
// For more details: https://en.wikipedia.org/wiki/Cash_rounding
func (d Decimal) RoundCash(interval uint8) Decimal {
var iVal *big.Int
@@ -1310,8 +1751,7 @@ func (d Decimal) Ceil() Decimal {
//
// Example:
//
-// decimal.NewFromString("123.456").Truncate(2).String() // "123.45"
-//
+// decimal.NewFromString("123.456").Truncate(2).String() // "123.45"
func (d Decimal) Truncate(precision int32) Decimal {
d.ensureInitialized()
if precision >= 0 && -precision > d.exp {
@@ -1373,19 +1813,18 @@ func (d *Decimal) UnmarshalBinary(data []byte) error {
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d Decimal) MarshalBinary() (data []byte, err error) {
- // Write the exponent first since it's a fixed size
- v1 := make([]byte, 4)
- binary.BigEndian.PutUint32(v1, uint32(d.exp))
-
- // Add the value
- var v2 []byte
- if v2, err = d.value.GobEncode(); err != nil {
- return
+ // exp is written first, but encode value first to know output size
+ var valueData []byte
+ if valueData, err = d.value.GobEncode(); err != nil {
+ return nil, err
}
+ // Write the exponent in front, since it's a fixed size
+ expData := make([]byte, 4, len(valueData)+4)
+ binary.BigEndian.PutUint32(expData, uint32(d.exp))
+
// Return the byte array
- data = append(v1, v2...)
- return
+ return append(expData, valueData...), nil
}
// Scan implements the sql.Scanner interface for database deserialization.
@@ -1408,6 +1847,11 @@ func (d *Decimal) Scan(value interface{}) error {
*d = New(v, 0)
return nil
+ case uint64:
+ // while clickhouse may send 0 in db as uint64
+ *d = NewFromUint64(v)
+ return nil
+
default:
// default is trying to interpret value stored as string
str, err := unquoteIfQuoted(v)
@@ -1455,7 +1899,8 @@ func (d *Decimal) GobDecode(data []byte) error {
}
// StringScaled first scales the decimal then calls .String() on it.
-// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead.
+//
+// Deprecated: buggy and unintuitive. Use StringFixed instead.
func (d Decimal) StringScaled(exp int32) string {
return d.rescale(exp).String()
}
@@ -1515,7 +1960,7 @@ func (d *Decimal) ensureInitialized() {
//
// To call this function with an array, you must do:
//
-// Min(arr[0], arr[1:]...)
+// Min(arr[0], arr[1:]...)
//
// This makes it harder to accidentally call Min with 0 arguments.
func Min(first Decimal, rest ...Decimal) Decimal {
@@ -1532,7 +1977,7 @@ func Min(first Decimal, rest ...Decimal) Decimal {
//
// To call this function with an array, you must do:
//
-// Max(arr[0], arr[1:]...)
+// Max(arr[0], arr[1:]...)
//
// This makes it harder to accidentally call Max with 0 arguments.
func Max(first Decimal, rest ...Decimal) Decimal {
@@ -1567,22 +2012,13 @@ func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) {
d1.ensureInitialized()
d2.ensureInitialized()
- if d1.exp == d2.exp {
- return d1, d2
+ if d1.exp < d2.exp {
+ return d1, d2.rescale(d1.exp)
+ } else if d1.exp > d2.exp {
+ return d1.rescale(d2.exp), d2
}
- baseScale := min(d1.exp, d2.exp)
- if baseScale != d1.exp {
- return d1.rescale(baseScale), d2
- }
- return d1, d2.rescale(baseScale)
-}
-
-func min(x, y int32) int32 {
- if x >= y {
- return y
- }
- return x
+ return d1, d2
}
func unquoteIfQuoted(value interface{}) (string, error) {
@@ -1594,8 +2030,7 @@ func unquoteIfQuoted(value interface{}) (string, error) {
case []byte:
bytes = v
default:
- return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'",
- value, value)
+ return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", value, value)
}
// If the amount is quoted, strip the quotes
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
index 120a57342..0e9e14593 100644
--- a/vendor/github.com/spf13/cast/README.md
+++ b/vendor/github.com/spf13/cast/README.md
@@ -1,8 +1,9 @@
-cast
-====
-[](https://godoc.org/github.com/spf13/cast)
-[](https://github.com/spf13/cast/actions/workflows/go.yml)
-[](https://goreportcard.com/report/github.com/spf13/cast)
+# cast
+
+[](https://github.com/spf13/cast/actions/workflows/ci.yaml)
+[](https://pkg.go.dev/mod/github.com/spf13/cast)
+
+[](https://goreportcard.com/report/github.com/spf13/cast)
Easy and safe casting from one type to another in Go
@@ -17,7 +18,7 @@ interface into a bool, etc. Cast does this intelligently when an obvious
conversion is possible. It doesn’t make any attempts to guess what you meant,
for example you can only convert a string to an int when it is a string
representation of an int such as “8”. Cast was developed for use in
-[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON
for meta data.
## Why use Cast?
@@ -72,4 +73,3 @@ the code for a complete set.
var eight interface{} = 8
cast.ToInt(eight) // 8
cast.ToInt(nil) // 0
-
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index 514d759bf..cd9c04885 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -18,6 +18,14 @@ import (
var errNegativeNotAllowed = errors.New("unable to cast negative value")
+type float64EProvider interface {
+ Float64() (float64, error)
+}
+
+type float64Provider interface {
+ Float64() float64
+}
+
// ToTimeE casts an interface to a time.Time type.
func ToTimeE(i interface{}) (tim time.Time, err error) {
return ToTimeInDefaultLocationE(i, time.UTC)
@@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) {
d, err = time.ParseDuration(s + "ns")
}
return
- case json.Number:
+ case float64EProvider:
var v float64
v, err = s.Float64()
d = time.Duration(v)
return
+ case float64Provider:
+ d = time.Duration(s.Float64())
+ return
default:
err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
return
@@ -98,10 +109,31 @@ func ToBoolE(i interface{}) (bool, error) {
case nil:
return false, nil
case int:
- if i.(int) != 0 {
- return true, nil
- }
- return false, nil
+ return b != 0, nil
+ case int64:
+ return b != 0, nil
+ case int32:
+ return b != 0, nil
+ case int16:
+ return b != 0, nil
+ case int8:
+ return b != 0, nil
+ case uint:
+ return b != 0, nil
+ case uint64:
+ return b != 0, nil
+ case uint32:
+ return b != 0, nil
+ case uint16:
+ return b != 0, nil
+ case uint8:
+ return b != 0, nil
+ case float64:
+ return b != 0, nil
+ case float32:
+ return b != 0, nil
+ case time.Duration:
+ return b != 0, nil
case string:
return strconv.ParseBool(i.(string))
case json.Number:
@@ -153,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) {
return v, nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
- case json.Number:
+ case float64EProvider:
v, err := s.Float64()
if err == nil {
return v, nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case float64Provider:
+ return s.Float64(), nil
case bool:
if s {
return 1, nil
@@ -209,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) {
return float32(v), nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
- case json.Number:
+ case float64EProvider:
v, err := s.Float64()
if err == nil {
return float32(v), nil
}
return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case float64Provider:
+ return float32(s.Float64()), nil
case bool:
if s {
return 1, nil
@@ -896,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} {
return nil
}
- var errorType = reflect.TypeOf((*error)(nil)).Elem()
- var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+ errorType := reflect.TypeOf((*error)(nil)).Elem()
+ fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
v := reflect.ValueOf(a)
for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
@@ -966,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) {
// ToStringMapStringE casts an interface to a map[string]string type.
func ToStringMapStringE(i interface{}) (map[string]string, error) {
- var m = map[string]string{}
+ m := map[string]string{}
switch v := i.(type) {
case map[string]string:
@@ -996,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) {
// ToStringMapStringSliceE casts an interface to a map[string][]string type.
func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
- var m = map[string][]string{}
+ m := map[string][]string{}
switch v := i.(type) {
case map[string][]string:
@@ -1060,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
// ToStringMapBoolE casts an interface to a map[string]bool type.
func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
- var m = map[string]bool{}
+ m := map[string]bool{}
switch v := i.(type) {
case map[interface{}]interface{}:
@@ -1085,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
// ToStringMapE casts an interface to a map[string]interface{} type.
func ToStringMapE(i interface{}) (map[string]interface{}, error) {
- var m = map[string]interface{}{}
+ m := map[string]interface{}{}
switch v := i.(type) {
case map[interface{}]interface{}:
@@ -1105,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) {
// ToStringMapIntE casts an interface to a map[string]int{} type.
func ToStringMapIntE(i interface{}) (map[string]int, error) {
- var m = map[string]int{}
+ m := map[string]int{}
if i == nil {
return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
}
@@ -1146,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) {
// ToStringMapInt64E casts an interface to a map[string]int64{} type.
func ToStringMapInt64E(i interface{}) (map[string]int64, error) {
- var m = map[string]int64{}
+ m := map[string]int64{}
if i == nil {
return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
}
@@ -1383,37 +1419,35 @@ func (f timeFormat) hasTimezone() bool {
return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone
}
-var (
- timeFormats = []timeFormat{
- {time.RFC3339, timeFormatNumericTimezone},
- {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
- {time.RFC1123Z, timeFormatNumericTimezone},
- {time.RFC1123, timeFormatNamedTimezone},
- {time.RFC822Z, timeFormatNumericTimezone},
- {time.RFC822, timeFormatNamedTimezone},
- {time.RFC850, timeFormatNamedTimezone},
- {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
- {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
- {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
- {"2006-01-02 15:04:05", timeFormatNoTimezone},
- {time.ANSIC, timeFormatNoTimezone},
- {time.UnixDate, timeFormatNamedTimezone},
- {time.RubyDate, timeFormatNumericTimezone},
- {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
- {"2006-01-02", timeFormatNoTimezone},
- {"02 Jan 2006", timeFormatNoTimezone},
- {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
- {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
- {time.Kitchen, timeFormatTimeOnly},
- {time.Stamp, timeFormatTimeOnly},
- {time.StampMilli, timeFormatTimeOnly},
- {time.StampMicro, timeFormatTimeOnly},
- {time.StampNano, timeFormatTimeOnly},
- }
-)
+var timeFormats = []timeFormat{
+ // Keep common formats at the top.
+ {"2006-01-02", timeFormatNoTimezone},
+ {time.RFC3339, timeFormatNumericTimezone},
+ {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone
+ {time.RFC1123Z, timeFormatNumericTimezone},
+ {time.RFC1123, timeFormatNamedTimezone},
+ {time.RFC822Z, timeFormatNumericTimezone},
+ {time.RFC822, timeFormatNamedTimezone},
+ {time.RFC850, timeFormatNamedTimezone},
+ {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String()
+ {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon
+ {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon
+ {"2006-01-02 15:04:05", timeFormatNoTimezone},
+ {time.ANSIC, timeFormatNoTimezone},
+ {time.UnixDate, timeFormatNamedTimezone},
+ {time.RubyDate, timeFormatNumericTimezone},
+ {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone},
+ {"02 Jan 2006", timeFormatNoTimezone},
+ {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone},
+ {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone},
+ {time.Kitchen, timeFormatTimeOnly},
+ {time.Stamp, timeFormatTimeOnly},
+ {time.StampMilli, timeFormatTimeOnly},
+ {time.StampMicro, timeFormatTimeOnly},
+ {time.StampNano, timeFormatTimeOnly},
+}
func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) {
-
for _, format := range formats {
if d, e = time.Parse(format.format, s); e == nil {
diff --git a/vendor/go.bytebuilders.dev/audit/lib/billing_event.go b/vendor/go.bytebuilders.dev/audit/lib/billing_event.go
index 95c3352af..40c505814 100644
--- a/vendor/go.bytebuilders.dev/audit/lib/billing_event.go
+++ b/vendor/go.bytebuilders.dev/audit/lib/billing_event.go
@@ -17,8 +17,11 @@ limitations under the License.
package lib
import (
+ "context"
+
api "go.bytebuilders.dev/audit/api/v1"
+ core "k8s.io/api/core/v1"
kmapi "kmodules.xyz/client-go/api/v1"
"kmodules.xyz/client-go/discovery"
corev1alpha1 "kmodules.xyz/resource-metadata/apis/core/v1alpha1"
@@ -28,6 +31,9 @@ import (
type BillingEventCreator struct {
Mapper discovery.ResourceMapper
ClusterMetadata *kmapi.ClusterMetadata
+ ClientBilling bool
+ PodLister client.Reader
+ PVCLister client.Reader
}
func (p *BillingEventCreator) CreateEvent(obj client.Object) (*api.Event, error) {
@@ -41,8 +47,66 @@ func (p *BillingEventCreator) CreateEvent(obj client.Object) (*api.Event, error)
return nil, err
}
+ if p.ClientBilling {
+ if r, ok := obj.(Resource); ok {
+ var podList core.PodList
+ err = p.PodLister.List(context.TODO(), &podList, client.InNamespace(obj.GetNamespace()), client.MatchingLabels(r.OffshootSelectors()))
+ if err != nil {
+ return nil, err
+ }
+ podresources := make([]corev1alpha1.ComputeResource, 0, len(podList.Items))
+ for _, pod := range podList.Items {
+ pr := corev1alpha1.ComputeResource{
+ UID: pod.UID,
+ Name: pod.Name,
+ CreationTimestamp: pod.CreationTimestamp,
+ Containers: make([]corev1alpha1.ContainerResource, 0, len(pod.Spec.Containers)),
+ InitContainers: make([]corev1alpha1.ContainerResource, 0, len(pod.Spec.Containers)),
+ }
+ for _, c := range pod.Spec.Containers {
+ pr.Containers = append(pr.Containers, corev1alpha1.ContainerResource{
+ Name: c.Name,
+ Resource: c.Resources,
+ RestartPolicy: c.RestartPolicy,
+ })
+ }
+ for _, c := range pod.Spec.InitContainers {
+ pr.InitContainers = append(pr.InitContainers, corev1alpha1.ContainerResource{
+ Name: c.Name,
+ Resource: c.Resources,
+ RestartPolicy: c.RestartPolicy,
+ })
+ }
+ podresources = append(podresources, pr)
+ }
+ res.Spec.Pods = podresources
+
+ var pvcList core.PersistentVolumeClaimList
+ err = p.PVCLister.List(context.TODO(), &pvcList, client.InNamespace(obj.GetNamespace()), client.MatchingLabels(r.OffshootSelectors()))
+ if err != nil {
+ return nil, err
+ }
+ pvcresources := make([]corev1alpha1.StorageResource, 0, len(podList.Items))
+ for _, pvc := range pvcList.Items {
+ if pvc.Status.Phase == core.ClaimBound {
+ pvcresources = append(pvcresources, corev1alpha1.StorageResource{
+ UID: pvc.UID,
+ Name: pvc.Name,
+ CreationTimestamp: pvc.CreationTimestamp,
+ Resources: pvc.Spec.Resources,
+ })
+ }
+ }
+ res.Spec.Storage = pvcresources
+ }
+ }
+
return &api.Event{
Resource: res,
ResourceID: *rid,
}, nil
}
+
+type Resource interface {
+ OffshootSelectors() map[string]string
+}
diff --git a/vendor/go.bytebuilders.dev/audit/lib/lister.go b/vendor/go.bytebuilders.dev/audit/lib/lister.go
new file mode 100644
index 000000000..f0e6fe842
--- /dev/null
+++ b/vendor/go.bytebuilders.dev/audit/lib/lister.go
@@ -0,0 +1,136 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lib
+
+import (
+ "context"
+ "reflect"
+
+ core "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ corelisters "k8s.io/client-go/listers/core/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type PodReader struct {
+ delegate corelisters.PodLister
+}
+
+var _ client.Reader = &PodReader{}
+
+func NewPodReader(delegate corelisters.PodLister) PodReader {
+ return PodReader{delegate: delegate}
+}
+
+func (r PodReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
+ pod, err := r.delegate.Pods(key.Namespace).Get(key.Name)
+ if err != nil {
+ return err
+ }
+ assign(obj, pod)
+ return nil
+}
+
+func (r PodReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ var o client.ListOptions
+ o.ApplyOptions(opts)
+
+ var ls labels.Selector
+ if o.LabelSelector != nil {
+ ls = o.LabelSelector
+ } else if o.Raw != nil && o.Raw.LabelSelector != "" {
+ var err error
+ ls, err = labels.Parse(o.Raw.LabelSelector)
+ if err != nil {
+ return err
+ }
+ }
+
+ pods, err := r.delegate.Pods(o.Namespace).List(ls)
+ if err != nil {
+ return err
+ }
+
+ podList := core.PodList{
+ Items: make([]core.Pod, 0, len(pods)),
+ }
+ for _, pod := range pods {
+ podList.Items = append(podList.Items, *pod)
+ }
+ assign(list, podList)
+
+ return nil
+}
+
+type PersistentVolumeClaimReader struct {
+ delegate corelisters.PersistentVolumeClaimLister
+}
+
+var _ client.Reader = &PersistentVolumeClaimReader{}
+
+func NewPersistentVolumeClaimReader(delegate corelisters.PersistentVolumeClaimLister) PersistentVolumeClaimReader {
+ return PersistentVolumeClaimReader{delegate: delegate}
+}
+
+func (r PersistentVolumeClaimReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, _ ...client.GetOption) error {
+ pod, err := r.delegate.PersistentVolumeClaims(key.Namespace).Get(key.Name)
+ if err != nil {
+ return err
+ }
+ assign(obj, pod)
+
+ return nil
+}
+
+func (r PersistentVolumeClaimReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ var o client.ListOptions
+ o.ApplyOptions(opts)
+
+ var ls labels.Selector
+ if o.LabelSelector != nil {
+ ls = o.LabelSelector
+ } else if o.Raw != nil && o.Raw.LabelSelector != "" {
+ var err error
+ ls, err = labels.Parse(o.Raw.LabelSelector)
+ if err != nil {
+ return err
+ }
+ }
+
+ pvcs, err := r.delegate.PersistentVolumeClaims(o.Namespace).List(ls)
+ if err != nil {
+ return err
+ }
+
+ pvcList := core.PersistentVolumeClaimList{
+ Items: make([]core.PersistentVolumeClaim, 0, len(pvcs)),
+ }
+ for _, pvc := range pvcs {
+ pvcList.Items = append(pvcList.Items, *pvc)
+ }
+ assign(list, pvcList)
+
+ return nil
+}
+
+func assign(target, src any) {
+ srcValue := reflect.ValueOf(src)
+ if srcValue.Kind() == reflect.Pointer {
+ srcValue = srcValue.Elem()
+ }
+ reflect.ValueOf(target).Elem().Set(srcValue)
+}
diff --git a/vendor/go.bytebuilders.dev/audit/lib/nats.go b/vendor/go.bytebuilders.dev/audit/lib/nats.go
index fb68966d9..60c8b1d1b 100644
--- a/vendor/go.bytebuilders.dev/audit/lib/nats.go
+++ b/vendor/go.bytebuilders.dev/audit/lib/nats.go
@@ -24,6 +24,7 @@ import (
"io"
"net/http"
"os"
+ "strings"
"sync"
"time"
@@ -45,10 +46,8 @@ const (
)
type NatsConfig struct {
- // LicenseID string `json:"licenseID"`
- Subject string `json:"natsSubject"`
- Server string `json:"natsServer"`
- Client *nats.Conn `json:"-"`
+ Subject string `json:"natsSubject"`
+ Server string `json:"natsServer"`
}
// NatsCredential represents the api response of the register licensed user api
@@ -57,79 +56,138 @@ type NatsCredential struct {
Credential []byte `json:"credential"`
}
-type LicenseIDGetter interface {
- GetLicenseID() string
-}
+type NatsClient struct {
+ cfg *rest.Config
+ clusterID string
+ LicenseFile string
+
+ le *kubernetes.LicenseEnforcer
+ l *v1alpha1.License
-type LicenseUpdater struct {
- le *kubernetes.LicenseEnforcer
- License v1alpha1.License
+ nc *nats.Conn
+ Subject string
+ Server string
mu sync.Mutex
}
-func (lu *LicenseUpdater) GetLicenseID() string {
- lu.mu.Lock()
- defer lu.mu.Unlock()
+func NewNatsClient(cfg *rest.Config, clusterID string, LicenseFile string) *NatsClient {
+ return &NatsClient{
+ cfg: cfg,
+ clusterID: clusterID,
+ LicenseFile: LicenseFile,
+ }
+}
+
+func (c *NatsClient) Request(data []byte, timeout time.Duration) (*nats.Msg, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ var justConnected bool
+ if c.nc == nil {
+ if err := c.connect(); err != nil {
+ return nil, err
+ }
+ justConnected = true
+ }
+ msg, err := c.nc.Request(c.Subject, data, timeout)
+ if err != nil && !justConnected && isNatsAuthError(err.Error()) {
+ if err := c.connect(); err != nil {
+ return nil, err
+ }
+ msg, err = c.nc.Request(c.Subject, data, timeout)
+ }
+ return msg, err
+}
+
+// src: https://github.com/nats-io/nats.go/blob/main/nats.go#L3693-L3709
+func isNatsAuthError(e string) bool {
+ if strings.HasPrefix(e, nats.AUTHORIZATION_ERR) {
+ return true
+ }
+ if strings.HasPrefix(e, nats.AUTHENTICATION_EXPIRED_ERR) {
+ return true
+ }
+ if strings.HasPrefix(e, nats.AUTHENTICATION_REVOKED_ERR) {
+ return true
+ }
+ if strings.HasPrefix(e, nats.ACCOUNT_AUTHENTICATION_EXPIRED_ERR) {
+ return true
+ }
+ return false
+}
+
+func (c *NatsClient) GetLicenseID() (string, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.l == nil {
+ if err := c.connect(); err != nil {
+ return "", err
+ }
+ }
- l := lu.License
- if l.Status == v1alpha1.LicenseActive && time.Now().After(l.NotAfter.Time) {
- license, _ := lu.le.LoadLicense()
- lu.License = license
- l = license
+ if c.l.Status == v1alpha1.LicenseActive && time.Now().After(c.l.NotAfter.Time) {
+ license, _ := c.le.LoadLicense()
+ c.l = &license
}
- return l.ID
+ return c.l.ID, nil
}
-func NewNatsConfig(cfg *rest.Config, clusterID string, LicenseFile string) (*NatsConfig, LicenseIDGetter, error) {
- le, err := kubernetes.NewLicenseEnforcer(cfg, LicenseFile)
+func (c *NatsClient) connect() error {
+ le, err := kubernetes.NewLicenseEnforcer(c.cfg, c.LicenseFile)
if err != nil {
- return nil, nil, err
+ return err
}
license, licenseBytes := le.LoadLicense()
if license.Status != v1alpha1.LicenseActive {
- return nil, nil, fmt.Errorf("license status is %s", license.Status)
+ return fmt.Errorf("license status is %s", license.Status)
}
opts := verifier.Options{
- ClusterUID: clusterID,
+ ClusterUID: c.clusterID,
Features: info.ProductName,
CACert: []byte(info.LicenseCA),
License: licenseBytes,
}
data, err := json.Marshal(opts)
if err != nil {
- return nil, nil, err
+ return err
}
resp, err := http.Post(info.MustRegistrationAPIEndpoint(), "application/json", bytes.NewReader(data))
if err != nil {
- return nil, nil, err
+ return err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
- return nil, nil, err
+ return err
}
if resp.StatusCode != http.StatusOK {
- return nil, nil, errors.New(resp.Status + ", " + string(body))
+ return errors.New(resp.Status + ", " + string(body))
}
var natscred NatsCredential
err = json.Unmarshal(body, &natscred)
if err != nil {
- return nil, nil, err
+ return err
}
klog.V(5).InfoS("using event receiver", "address", natscred.Server, "subject", natscred.Subject, "licenseID", license.ID)
- natscred.Client, err = NewConnection(license.ID, natscred)
+ nc, err := NewConnection(license.ID, natscred)
if err != nil {
- return nil, nil, err
+ return err
}
- return &natscred.NatsConfig, &LicenseUpdater{le: le, License: license}, nil
+ c.le = le
+ c.l = &license
+ c.nc = nc
+ c.Subject = natscred.Subject
+ c.Server = natscred.Server
+ return nil
}
// NewConnection creates a new NATS connection
diff --git a/vendor/go.bytebuilders.dev/audit/lib/publisher.go b/vendor/go.bytebuilders.dev/audit/lib/publisher.go
index ae20db2c8..a5a58ef85 100644
--- a/vendor/go.bytebuilders.dev/audit/lib/publisher.go
+++ b/vendor/go.bytebuilders.dev/audit/lib/publisher.go
@@ -18,6 +18,7 @@ package lib
import (
"context"
+ "errors"
"fmt"
gosync "sync"
"time"
@@ -27,9 +28,7 @@ import (
cloudeventssdk "github.com/cloudevents/sdk-go/v2"
"github.com/cloudevents/sdk-go/v2/binding/format"
cloudevents "github.com/cloudevents/sdk-go/v2/event"
- "github.com/nats-io/nats.go"
"go.bytebuilders.dev/license-verifier/info"
- "gomodules.xyz/sync"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -62,11 +61,7 @@ type Informer interface {
type EventCreator func(obj client.Object) (*api.Event, error)
type EventPublisher struct {
- once sync.Once
- connect func() error
-
- nats *NatsConfig
- lu LicenseIDGetter
+ c *NatsClient
mapper discovery.ResourceMapper
createEvent EventCreator
@@ -75,47 +70,15 @@ type EventPublisher struct {
}
func NewEventPublisher(
- nats *NatsConfig,
+ c *NatsClient,
mapper discovery.ResourceMapper,
fn EventCreator,
) *EventPublisher {
- p := &EventPublisher{
+ return &EventPublisher{
+ c: c,
mapper: mapper,
createEvent: fn,
}
- p.connect = func() error {
- p.nats = nats
- return nil
- }
- return p
-}
-
-func NewResilientEventPublisher(
- fnConnect func() (*NatsConfig, LicenseIDGetter, error),
- mapper discovery.ResourceMapper,
- fnCreateEvent EventCreator,
-) *EventPublisher {
- p := &EventPublisher{
- mapper: mapper,
- createEvent: fnCreateEvent,
- }
- p.connect = func() error {
- var err error
- p.nats, p.lu, err = fnConnect()
- if err != nil {
- klog.V(5).InfoS("failed to connect with event receiver", "error", err)
- }
- return err
- }
- return p
-}
-
-func (p *EventPublisher) NatsClient() (*nats.Conn, error) {
- p.once.Do(p.connect)
- if p.nats == nil {
- return nil, fmt.Errorf("not connected to nats")
- }
- return p.nats.Client, nil
}
func (p *EventPublisher) Publish(ev *api.Event, et api.EventType) error {
@@ -145,7 +108,7 @@ func (p *EventPublisher) Publish(ev *api.Event, et api.EventType) error {
defer cancel()
for {
- _, err = p.nats.Client.Request(p.nats.Subject, data, natsRequestTimeout)
+ _, err = p.c.Request(data, natsRequestTimeout)
if err == nil {
cancel()
} else {
@@ -154,10 +117,10 @@ func (p *EventPublisher) Publish(ev *api.Event, et api.EventType) error {
select {
case <-ctx.Done():
- if ctx.Err() == context.DeadlineExceeded {
+ if errors.Is(ctx.Err(), context.DeadlineExceeded) {
klog.V(5).Infof("failed to send event : %s", string(data))
- } else if ctx.Err() == context.Canceled {
- klog.V(5).Infof("Published event `%s` to channel `%s` and acknowledged", et, p.nats.Subject)
+ } else if errors.Is(ctx.Err(), context.Canceled) {
+ klog.V(5).Infof("Published event `%s` to channel `%s` and acknowledged", et, p.c.Subject)
}
return nil
default:
@@ -183,14 +146,8 @@ func (p *EventPublisher) ForGVK(informer Informer, gvk schema.GroupVersionKind)
if err != nil {
return nil, err
}
-
- p.once.Do(p.connect)
- if p.nats == nil {
- return nil, fmt.Errorf("not connected to nats")
- }
- ev.LicenseID = p.lu.GetLicenseID()
-
- return ev, nil
+ ev.LicenseID, err = p.c.GetLicenseID()
+ return ev, err
},
}
_, _ = informer.AddEventHandlerWithResyncPeriod(h, eventInterval)
@@ -255,12 +212,10 @@ func (p *EventPublisher) setupSiteInfoPublisher(cfg *rest.Config, kc kubernetes.
identitylib.RefreshNodeStats(p.si, nodes)
p.siMutex.Unlock()
- p.once.Do(p.connect)
- if p.nats == nil {
- return nil, fmt.Errorf("not connected to nats")
+ licenseID, err := p.c.GetLicenseID()
+ if err != nil {
+ return nil, err
}
-
- licenseID := p.lu.GetLicenseID()
p.si.Product.LicenseID = licenseID
p.si.Name = fmt.Sprintf("%s.%s", licenseID, p.si.Product.ProductName)
ev := &api.Event{
diff --git a/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/helper.go b/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/helper.go
index 0221b461b..0be4d2a6b 100644
--- a/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/helper.go
+++ b/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/helper.go
@@ -17,7 +17,11 @@ limitations under the License.
package v1alpha1
func (l License) DisableAnalytics() bool {
- return len(l.FeatureFlags) > 0 && l.FeatureFlags["DisableAnalytics"] == "true"
+ return len(l.FeatureFlags) > 0 && l.FeatureFlags[FeatureDisableAnalytics] == "true"
+}
+
+func (l License) EnableClientBilling() bool {
+ return len(l.FeatureFlags) > 0 && l.FeatureFlags[FeatureEnableClientBilling] == "true"
}
func (i *License) Less(j *License) bool {
diff --git a/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/types.go b/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/types.go
index eb8d56217..bb8adf577 100644
--- a/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/types.go
+++ b/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/types.go
@@ -17,7 +17,11 @@ limitations under the License.
package v1alpha1
import (
+ "fmt"
+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -26,20 +30,20 @@ import (
type License struct {
metav1.TypeMeta `json:",inline,omitempty"`
- Data []byte `json:"-"`
- Issuer string `json:"issuer,omitempty"` // byte.builders
- ProductLine string `json:"productLine,omitempty"`
- TierName string `json:"tierName,omitempty"`
- PlanName string `json:"planName,omitempty"`
- Features []string `json:"features,omitempty"`
- FeatureFlags map[string]string `json:"featureFlags,omitempty"`
- Clusters []string `json:"clusters,omitempty"` // cluster_id ?
- User *User `json:"user,omitempty"`
- NotBefore *metav1.Time `json:"notBefore,omitempty"` // start of subscription start
- NotAfter *metav1.Time `json:"notAfter,omitempty"` // if set, use this
- ID string `json:"id,omitempty"` // license ID
- Status LicenseStatus `json:"status"`
- Reason string `json:"reason"`
+ Data []byte `json:"-"`
+ Issuer string `json:"issuer,omitempty"` // byte.builders
+ ProductLine string `json:"productLine,omitempty"`
+ TierName string `json:"tierName,omitempty"`
+ PlanName string `json:"planName,omitempty"`
+ Features []string `json:"features,omitempty"`
+ FeatureFlags FeatureFlags `json:"featureFlags,omitempty"`
+ Clusters []string `json:"clusters,omitempty"` // cluster_id ?
+ User *User `json:"user,omitempty"`
+ NotBefore *metav1.Time `json:"notBefore,omitempty"` // start of subscription start
+ NotAfter *metav1.Time `json:"notAfter,omitempty"` // if set, use this
+ ID string `json:"id,omitempty"` // license ID
+ Status LicenseStatus `json:"status"`
+ Reason string `json:"reason"`
}
type User struct {
@@ -62,3 +66,36 @@ type Contract struct {
StartTimestamp metav1.Time `json:"startTimestamp"`
ExpiryTimestamp metav1.Time `json:"expiryTimestamp"`
}
+
+type FeatureFlag string
+
+const (
+ FeatureDisableAnalytics FeatureFlag = "DisableAnalytics"
+ FeatureRestrictions FeatureFlag = "Restrictions"
+ FeatureEnableClientBilling FeatureFlag = "EnableClientBilling"
+)
+
+var knownFlags = sets.New[FeatureFlag](FeatureDisableAnalytics, FeatureRestrictions, FeatureEnableClientBilling)
+
+type FeatureFlags map[FeatureFlag]string
+
+func (f FeatureFlags) IsValid() error {
+ var errs []error
+ for k := range f {
+ if !knownFlags.Has(k) {
+ errs = append(errs, fmt.Errorf("unknown feature flag %q", k))
+ }
+ }
+ return errors.NewAggregate(errs)
+}
+
+func (f FeatureFlags) ToSlice() []string {
+ if len(f) == 0 {
+ return nil
+ }
+ result := make([]string, 0, len(f))
+ for k, v := range f {
+ result = append(result, fmt.Sprintf("%s=%s", k, v))
+ }
+ return result
+}
diff --git a/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/zz_generated.deepcopy.go b/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/zz_generated.deepcopy.go
index a191107c0..9ba4b716c 100644
--- a/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1/zz_generated.deepcopy.go
@@ -43,6 +43,28 @@ func (in *Contract) DeepCopy() *Contract {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in FeatureFlags) DeepCopyInto(out *FeatureFlags) {
+ {
+ in := &in
+ *out = make(FeatureFlags, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureFlags.
+func (in FeatureFlags) DeepCopy() FeatureFlags {
+ if in == nil {
+ return nil
+ }
+ out := new(FeatureFlags)
+ in.DeepCopyInto(out)
+ return *out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *License) DeepCopyInto(out *License) {
*out = *in
@@ -59,7 +81,7 @@ func (in *License) DeepCopyInto(out *License) {
}
if in.FeatureFlags != nil {
in, out := &in.FeatureFlags, &out.FeatureFlags
- *out = make(map[string]string, len(*in))
+ *out = make(FeatureFlags, len(*in))
for key, val := range *in {
(*out)[key] = val
}
diff --git a/vendor/go.bytebuilders.dev/license-verifier/lib.go b/vendor/go.bytebuilders.dev/license-verifier/lib.go
index 5f8c107ce..34c62fd3d 100644
--- a/vendor/go.bytebuilders.dev/license-verifier/lib.go
+++ b/vendor/go.bytebuilders.dev/license-verifier/lib.go
@@ -115,11 +115,11 @@ func ParseLicense(opts ParserOptions) (v1alpha1.License, error) {
license.TierName = parts[1]
}
}
- license.FeatureFlags = map[string]string{}
+ license.FeatureFlags = v1alpha1.FeatureFlags{}
for _, ff := range cert.Subject.Locality {
parts := strings.SplitN(ff, "=", 2)
if len(parts) == 2 {
- license.FeatureFlags[parts[0]] = parts[1]
+ license.FeatureFlags[v1alpha1.FeatureFlag(parts[0])] = parts[1]
}
}
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
deleted file mode 100644
index 59b3a95a7..000000000
--- a/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ed25519 implements the Ed25519 signature algorithm. See
-// https://ed25519.cr.yp.to/.
-//
-// These functions are also compatible with the “Ed25519” function defined in
-// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
-// representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the RFC
-// 8032 private key as the “seed”.
-//
-// This package is a wrapper around the standard library crypto/ed25519 package.
-package ed25519
-
-import (
- "crypto/ed25519"
- "io"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-// PublicKey is the type of Ed25519 public keys.
-//
-// This type is an alias for crypto/ed25519's PublicKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PublicKey = ed25519.PublicKey
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-//
-// This type is an alias for crypto/ed25519's PrivateKey type.
-// See the crypto/ed25519 package for the methods on this type.
-type PrivateKey = ed25519.PrivateKey
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- return ed25519.GenerateKey(rand)
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- return ed25519.NewKeyFromSeed(seed)
-}
-
-// Sign signs the message with privateKey and returns a signature. It will
-// panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- return ed25519.Sign(privateKey, message)
-}
-
-// Verify reports whether sig is a valid signature of message by publicKey. It
-// will panic if len(publicKey) is not PublicKeySize.
-func Verify(publicKey PublicKey, message, sig []byte) bool {
- return ed25519.Verify(publicKey, message, sig)
-}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
index 5e8158bba..46ceac343 100644
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S {
return s
}
+// clearSlice sets all elements up to the length of s to the zero value of E.
+// We may use the builtin clear func instead, and remove clearSlice, when upgrading
+// to Go 1.21+.
+func clearSlice[S ~[]E, E any](s S) {
+ var zero E
+ for i := range s {
+ s[i] = zero
+ }
+}
+
// Delete removes the elements s[i:j] from s, returning the modified slice.
-// Delete panics if s[i:j] is not a valid slice of s.
-// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-i), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
-// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
-// elements contain pointers you might consider zeroing those elements so that
-// objects they reference can be garbage collected.
+// Delete zeroes the elements s[len(s)-(j-i):len(s)].
func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j] // bounds check
+ _ = s[i:j:len(s)] // bounds check
- return append(s[:i], s[j:]...)
+ if i == j {
+ return s
+ }
+
+ oldlen := len(s)
+ s = append(s[:i], s[j:]...)
+ clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
+ return s
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
-// When DeleteFunc removes m elements, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage
-// collected.
+// DeleteFunc zeroes the elements between the new length and the original length.
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i := IndexFunc(s, del)
if i == -1 {
@@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
i++
}
}
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
_ = s[i:j] // verify that i:j is a valid subslice
@@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
if i+len(v) != j {
copy(r[i+len(v):], s[j:])
}
+ clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
return r
}
@@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S {
// This is like the uniq command found on Unix.
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
-// When Compact discards m elements in total, it might not modify the elements
-// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
-// zeroing those elements so that objects they reference can be garbage collected.
+// Compact zeroes the elements between the new length and the original length.
func Compact[S ~[]E, E comparable](s S) S {
if len(s) < 2 {
return s
@@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S {
i++
}
}
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
+// CompactFunc zeroes the elements between the new length and the original length.
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
if len(s) < 2 {
return s
@@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
i++
}
}
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
return s[:i]
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
new file mode 100644
index 000000000..2ef36bbcf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodelim marshals and unmarshals varint size-delimited messages.
+package protodelim
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/proto"
+)
+
+// MarshalOptions is a configurable varint size-delimited marshaler.
+type MarshalOptions struct{ proto.MarshalOptions }
+
+// MarshalTo writes a varint size-delimited wire-format message to w.
+// If w returns an error, MarshalTo returns it unchanged.
+func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
+ msgBytes, err := o.MarshalOptions.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
+ sizeWritten, err := w.Write(sizeBytes)
+ if err != nil {
+ return sizeWritten, err
+ }
+ msgWritten, err := w.Write(msgBytes)
+ if err != nil {
+ return sizeWritten + msgWritten, err
+ }
+ return sizeWritten + msgWritten, nil
+}
+
+// MarshalTo writes a varint size-delimited wire-format message to w
+// with the default options.
+//
+// See the documentation for [MarshalOptions.MarshalTo].
+func MarshalTo(w io.Writer, m proto.Message) (int, error) {
+ return MarshalOptions{}.MarshalTo(w, m)
+}
+
+// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
+type UnmarshalOptions struct {
+ proto.UnmarshalOptions
+
+ // MaxSize is the maximum size in wire-format bytes of a single message.
+ // Unmarshaling a message larger than MaxSize will return an error.
+ // A zero MaxSize will default to 4 MiB.
+ // Setting MaxSize to -1 disables the limit.
+ MaxSize int64
+}
+
+const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
+
+// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
+// that is larger than its configured [UnmarshalOptions.MaxSize].
+type SizeTooLargeError struct {
+ // Size is the varint size of the message encountered
+ // that was larger than the provided MaxSize.
+ Size uint64
+
+ // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
+ MaxSize uint64
+}
+
+func (e *SizeTooLargeError) Error() string {
+ return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
+}
+
+// Reader is the interface expected by [UnmarshalFrom].
+// It is implemented by *[bufio.Reader].
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// The error is [io.EOF] error only if no bytes are read.
+// If an EOF happens after reading some but not all the bytes,
+// UnmarshalFrom returns a non-io.EOF error.
+// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
+// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
+func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
+ var sizeArr [binary.MaxVarintLen64]byte
+ sizeBuf := sizeArr[:0]
+ for i := range sizeArr {
+ b, err := r.ReadByte()
+ if err != nil {
+ // Immediate EOF is unexpected.
+ if err == io.EOF && i != 0 {
+ break
+ }
+ return err
+ }
+ sizeBuf = append(sizeBuf, b)
+ if b < 0x80 {
+ break
+ }
+ }
+ size, n := protowire.ConsumeVarint(sizeBuf)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+
+ maxSize := o.MaxSize
+ if maxSize == 0 {
+ maxSize = defaultMaxSize
+ }
+ if maxSize != -1 && size > uint64(maxSize) {
+ return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
+ }
+
+ var b []byte
+ var err error
+ if br, ok := r.(*bufio.Reader); ok {
+ // Use the []byte from the bufio.Reader instead of having to allocate one.
+ // This reduces CPU usage and allocated bytes.
+ b, err = br.Peek(int(size))
+ if err == nil {
+ defer br.Discard(int(size))
+ } else {
+ b = nil
+ }
+ }
+ if b == nil {
+ b = make([]byte, size)
+ _, err = io.ReadFull(r, b)
+ }
+
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ if err != nil {
+ return err
+ }
+ if err := o.Unmarshal(b, m); err != nil {
+ return err
+ }
+ return nil
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r with the default options.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the documentation for [UnmarshalOptions.UnmarshalFrom].
+func UnmarshalFrom(r Reader, m proto.Message) error {
+ return UnmarshalOptions{}.UnmarshalFrom(r, m)
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index f47902371..bb2966e3b 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -102,7 +102,7 @@ type decoder struct {
}
// newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...interface{}) error {
+func (d decoder) newError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
@@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error {
}
// syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 3f75098b6..29846df22 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -25,15 +25,17 @@ const defaultIndent = " "
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given [proto.Message] in JSON format using default options.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
@@ -110,8 +112,9 @@ type MarshalOptions struct {
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "" // invalid syntax, but okay since this is for debugging
@@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
}
// Marshal marshals the given [proto.Message] in the JSON format using options in
-// MarshalOptions. Do not depend on the output being stable. It may change over
-// time across different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index a45f112bc..24bc98ac4 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -84,7 +84,7 @@ type decoder struct {
}
// newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...interface{}) error {
+func (d decoder) newError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
@@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error {
}
// syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 95967e811..1f57e6610 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -27,15 +27,17 @@ const defaultIndent = " "
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given [proto.Message] in textproto format using default
-// options. Do not depend on the output being stable. It may change over time
-// across different versions of the program.
+// options. Do not depend on the output being stable. Its output will change
+// across different builds of your program, even when using the same version of
+// the protobuf module.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
@@ -84,8 +86,9 @@ type MarshalOptions struct {
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "" // invalid syntax, but okay since this is for debugging
@@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string {
}
// Marshal writes the given [proto.Message] in textproto format using options in
-// MarshalOptions object. Do not depend on the output being stable. It may
-// change over time across different versions of the program.
+// MarshalOptions object. Do not depend on the output being stable. Its output
+// will change across different builds of your program, even when using the
+// same version of the protobuf module.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index a45625c8d..87e46bd4d 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu
{rv.MethodByName("Values"), "Values"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
+ {rv.MethodByName("IsClosed"), "IsClosed"},
}...)
case protoreflect.EnumValueDescriptor:
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 18f075687..ff6a38360 100644
Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
new file mode 100644
index 000000000..029a6a12d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -0,0 +1,13 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package editionssupport defines constants for editions that are supported.
+package editionssupport
+
+import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+
+const (
+ Minimum = descriptorpb.Edition_EDITION_PROTO2
+ Maximum = descriptorpb.Edition_EDITION_2023
+)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
index d2b3ac031..ea1d3e65a 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) {
// newSyntaxError returns an error with line and column information useful for
// syntax errors.
-func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
+func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
e := errors.New(f, x...)
line, column := d.Position(pos)
return errors.New("syntax error (line %d:%d): %v", line, column, e)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 373d20837..7e87c7604 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0))
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
f := new(filedesc.Field)
f.L0.ParentFile = filedesc.SurrogateProto2
+ f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
@@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
- f.L1.HasPacked = true
- f.L1.IsPacked = true
+ f.L1.EditionFeatures.IsPacked = true
case strings.HasPrefix(s, "weak="):
f.L1.IsWeak = true
f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 87853e786..099b2bf45 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token {
// newSyntaxError returns a syntax error with line and column information for
// current position.
-func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
+func (d *Decoder) newSyntaxError(f string, x ...any) error {
e := errors.New(f, x...)
line, column := d.Position(len(d.orig) - len(d.in))
return errors.New("syntax error (line %d:%d): %v", line, column, e)
diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
index 20c17b35e..c2d6bd526 100644
--- a/vendor/google.golang.org/protobuf/internal/errors/errors.go
+++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
@@ -17,7 +17,7 @@ var Error = errors.New("protobuf error")
// New formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
-func New(f string, x ...interface{}) error {
+func New(f string, x ...any) error {
return &prefixError{s: format(f, x...)}
}
@@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error {
// Wrap returns an error that has a "proto" prefix, the formatted string described
// by the format specifier and arguments, and a suffix of err. The error wraps err.
-func Wrap(err error, f string, x ...interface{}) error {
+func Wrap(err error, f string, x ...any) error {
return &wrapError{
s: format(f, x...),
err: err,
@@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool {
return target == Error
}
-func format(f string, x ...interface{}) string {
+func format(f string, x ...any) string {
// avoid "proto: " prefix when chaining
for i := 0; i < len(x); i++ {
switch e := x[i].(type) {
@@ -87,3 +87,18 @@ func InvalidUTF8(name string) error {
func RequiredNotSet(name string) error {
return New("required field %v not set", name)
}
+
+type SizeMismatchError struct {
+ Calculated, Measured int
+}
+
+func (e *SizeMismatchError) Error() string {
+ return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured)
+}
+
+func MismatchedSizeCalculation(calculated, measured int) error {
+ return &SizeMismatchError{
+ Calculated: calculated,
+ Measured: measured,
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 8826bcf40..df53ff40b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -7,6 +7,7 @@ package filedesc
import (
"bytes"
"fmt"
+ "strings"
"sync"
"sync/atomic"
@@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
func (fd *File) Parent() protoreflect.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
-func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
-func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
-func (fd *File) IsPlaceholder() bool { return false }
+
+// Not exported and just used to reconstruct the original FileDescriptor proto
+func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
+func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
+func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
@@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
}
+func (ed *Enum) IsClosed() bool {
+ return !ed.L1.EditionFeatures.IsOpenEnum
+}
func (ed *EnumValue) Options() protoreflect.ProtoMessage {
if f := ed.L1.Options; f != nil {
@@ -251,10 +258,6 @@ type (
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
- HasPacked bool // promoted from google.protobuf.FieldOptions
- IsPacked bool // promoted from google.protobuf.FieldOptions
- HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
- EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool {
if fd.L1.Cardinality == protoreflect.Repeated {
return false
}
- explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence
- return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
+ return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
}
func (fd *Field) HasOptionalKeyword() bool {
return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
@@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool {
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
return false
}
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
- return fd.L1.EditionFeatures.IsPacked
- }
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 {
- // proto3 repeated fields are packed by default.
- return !fd.L1.HasPacked || fd.L1.IsPacked
- }
- return fd.L1.IsPacked
+ return fd.L1.EditionFeatures.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
@@ -388,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor {
}
return fd.L1.Message
}
+func (fd *Field) IsMapEntry() bool {
+ parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor)
+ return ok && parent.IsMapEntry()
+}
func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
@@ -399,13 +398,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *Field) EnforceUTF8() bool {
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
- return fd.L1.EditionFeatures.IsUTF8Validated
- }
- if fd.L1.HasEnforceUTF8 {
- return fd.L1.EnforceUTF8
- }
- return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
+ return fd.L1.EditionFeatures.IsUTF8Validated
}
func (od *Oneof) IsSynthetic() bool {
@@ -438,7 +431,6 @@ type (
Options func() protoreflect.ProtoMessage
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
- IsPacked bool // promoted from google.protobuf.FieldOptions
Default defaultValue
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
@@ -461,7 +453,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi
func (xd *Extension) HasOptionalKeyword() bool {
return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
}
-func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
+func (xd *Extension) IsPacked() bool {
+ if xd.L1.Cardinality != protoreflect.Repeated {
+ return false
+ }
+ switch xd.L1.Kind {
+ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
+ return false
+ }
+ return xd.L1.EditionFeatures.IsPacked
+}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
@@ -542,8 +543,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {}
// Surrogate files are can be used to create standalone descriptors
// where the syntax is only information derived from the parent file.
var (
- SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
- SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
+ SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
+ SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
+ SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}}
)
type (
@@ -585,6 +587,34 @@ func (s *stringName) InitJSON(name string) {
s.nameJSON = name
}
+// Returns true if this field is structured like the synthetic field of a proto2
+// group. This allows us to expand our treatment of delimited fields without
+// breaking proto2 files that have been upgraded to editions.
+func isGroupLike(fd protoreflect.FieldDescriptor) bool {
+ // Groups are always group types.
+ if fd.Kind() != protoreflect.GroupKind {
+ return false
+ }
+
+ // Group fields are always the lowercase type name.
+ if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) {
+ return false
+ }
+
+ // Groups could only be defined in the same file they're used.
+ if fd.Message().ParentFile() != fd.ParentFile() {
+ return false
+ }
+
+ // Group messages are always defined in the same scope as the field. File
+ // level extensions will compare NULL == NULL here, which is why the file
+ // comparison above is necessary to ensure both come from the same file.
+ if fd.IsExtension() {
+ return fd.Parent() == fd.Message().Parent()
+ }
+ return fd.ContainingMessage() == fd.Message().Parent()
+}
+
func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
s.once.Do(func() {
if fd.IsExtension() {
@@ -605,7 +635,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
// Format the text name.
s.nameText = string(fd.Name())
- if fd.Kind() == protoreflect.GroupKind {
+ if isGroupLike(fd) {
s.nameText = string(fd.Message().Name())
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 237e64fd2..8a57d60b0 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) {
switch string(v) {
case "proto2":
fd.L1.Syntax = protoreflect.Proto2
+ fd.L1.Edition = EditionProto2
case "proto3":
fd.L1.Syntax = protoreflect.Proto3
+ fd.L1.Edition = EditionProto3
case "editions":
fd.L1.Syntax = protoreflect.Editions
default:
@@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) {
// If syntax is missing, it is assumed to be proto2.
if fd.L1.Syntax == 0 {
fd.L1.Syntax = protoreflect.Proto2
+ fd.L1.Edition = EditionProto2
}
- if fd.L1.Syntax == protoreflect.Editions {
- fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
- }
+ fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
// Parse editions features from options if any
if options != nil {
@@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
ed.L0.ParentFile = pf
ed.L0.Parent = pd
ed.L0.Index = i
+ ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent())
var numValues int
for b := b; len(b) > 0; {
@@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
xd.L0.ParentFile = pf
xd.L0.Parent = pd
xd.L0.Index = i
+ xd.L1.EditionFeatures = featuresFromParentDesc(pd)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.FieldDescriptorProto_Extendee_field_number:
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
+ case genid.FieldDescriptorProto_Options_field_number:
+ xd.unmarshalOptions(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+
+ if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
+ xd.L1.Kind = protoreflect.GroupKind
+ }
+}
+
+func (xd *Extension) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldOptions_Packed_field_number:
+ xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldOptions_Features_field_number:
+ xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
@@ -499,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
}
var nameBuilderPool = sync.Pool{
- New: func() interface{} { return new(strs.Builder) },
+ New: func() any { return new(strs.Builder) },
}
func getBuilder() *strs.Builder {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 482a61cc1..e56c91a8d 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -45,6 +45,11 @@ func (file *File) resolveMessages() {
case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
depIdx++
+ if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) {
+ // A map field might inherit delimited encoding from a file-wide default feature.
+ // But maps never actually use delimited encoding. (At least for now...)
+ fd.L1.Kind = protoreflect.MessageKind
+ }
}
// Default is resolved here since it depends on Enum being resolved.
@@ -466,10 +471,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
b = b[m:]
}
}
- if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
+ if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
fd.L1.Kind = protoreflect.GroupKind
}
- if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired {
+ if fd.L1.EditionFeatures.IsLegacyRequired {
fd.L1.Cardinality = protoreflect.Required
}
if rawTypeName != nil {
@@ -496,13 +501,11 @@ func (fd *Field) unmarshalOptions(b []byte) {
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
- fd.L1.HasPacked = true
- fd.L1.IsPacked = protowire.DecodeBool(v)
+ fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
- fd.L1.HasEnforceUTF8 = true
- fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
+ fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -548,7 +551,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref
func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
var rawTypeName []byte
var rawOptions []byte
- xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee)
xd.L2 = new(ExtensionL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -572,7 +574,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
- xd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -580,12 +581,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
b = b[m:]
}
}
- if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
- xd.L1.Kind = protoreflect.GroupKind
- }
- if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired {
- xd.L1.Cardinality = protoreflect.Required
- }
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch xd.L1.Kind {
@@ -598,32 +593,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
-func (xd *Extension) unmarshalOptions(b []byte) {
- for len(b) > 0 {
- num, typ, n := protowire.ConsumeTag(b)
- b = b[n:]
- switch typ {
- case protowire.VarintType:
- v, m := protowire.ConsumeVarint(b)
- b = b[m:]
- switch num {
- case genid.FieldOptions_Packed_field_number:
- xd.L2.IsPacked = protowire.DecodeBool(v)
- }
- case protowire.BytesType:
- v, m := protowire.ConsumeBytes(b)
- b = b[m:]
- switch num {
- case genid.FieldOptions_Features_field_number:
- xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
- }
- default:
- m := protowire.ConsumeFieldValue(num, typ, b)
- b = b[m:]
- }
- }
-}
-
func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
var rawMethods [][]byte
var rawOptions []byte
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
index 30db19fdc..f4107c05f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -8,6 +8,7 @@ package filedesc
import (
"fmt"
+ "strings"
"sync"
"google.golang.org/protobuf/internal/descfmt"
@@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields {
if _, ok := p.byText[d.TextName()]; !ok {
p.byText[d.TextName()] = d
}
+ if isGroupLike(d) {
+ lowerJSONName := strings.ToLower(d.JSONName())
+ if _, ok := p.byJSON[lowerJSONName]; !ok {
+ p.byJSON[lowerJSONName] = d
+ }
+ lowerTextName := strings.ToLower(d.TextName())
+ if _, ok := p.byText[lowerTextName]; !ok {
+ p.byText[lowerTextName] = d
+ }
+ }
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 0375a49d4..11f5f356b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -14,9 +14,13 @@ import (
)
var defaultsCache = make(map[Edition]EditionFeatures)
+var defaultsKeys = []Edition{}
func init() {
unmarshalEditionDefaults(editiondefaults.Defaults)
+ SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2)
+ SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3)
+ SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023)
}
func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
@@ -104,12 +108,15 @@ func unmarshalEditionDefault(b []byte) {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number:
+ case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number:
+ fs = unmarshalFeatureSet(v, fs)
+ case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number:
fs = unmarshalFeatureSet(v, fs)
}
}
}
defaultsCache[ed] = fs
+ defaultsKeys = append(defaultsKeys, ed)
}
func unmarshalEditionDefaults(b []byte) {
@@ -135,8 +142,15 @@ func unmarshalEditionDefaults(b []byte) {
}
func getFeaturesFor(ed Edition) EditionFeatures {
- if def, ok := defaultsCache[ed]; ok {
- return def
+ match := EditionUnknown
+ for _, key := range defaultsKeys {
+ if key > ed {
+ break
+ }
+ match = key
+ }
+ if match == EditionUnknown {
+ panic(fmt.Sprintf("unsupported edition: %v", ed))
}
- panic(fmt.Sprintf("unsupported edition: %v", ed))
+ return defaultsCache[match]
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
index 28240ebc5..bfb3b8417 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
@@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des
func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames }
func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges }
+func (e PlaceholderEnum) IsClosed() bool { return false }
func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return }
func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index f0e38c4ef..ba83fea44 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -68,7 +68,7 @@ type Builder struct {
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
// dependencies themselves is unspecified.
- GoTypes []interface{}
+ GoTypes []any
// DependencyIndexes is an ordered list of indexes into GoTypes for the
// dependencies of messages, extensions, or services.
@@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 {
type (
resolverByIndex struct {
- goTypes []interface{}
+ goTypes []any
depIdxs depIdxs
fileRegistry
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 40272c893..f30ab6b58 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -21,6 +21,7 @@ const (
// Enum values for google.protobuf.Edition.
const (
Edition_EDITION_UNKNOWN_enum_value = 0
+ Edition_EDITION_LEGACY_enum_value = 900
Edition_EDITION_PROTO2_enum_value = 998
Edition_EDITION_PROTO3_enum_value = 999
Edition_EDITION_2023_enum_value = 1000
@@ -653,6 +654,7 @@ const (
FieldOptions_Targets_field_name protoreflect.Name = "targets"
FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
FieldOptions_Features_field_name protoreflect.Name = "features"
+ FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@@ -667,6 +669,7 @@ const (
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
+ FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@@ -684,6 +687,7 @@ const (
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
FieldOptions_Features_field_number protoreflect.FieldNumber = 21
+ FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -767,6 +771,33 @@ const (
FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
)
+// Names for google.protobuf.FieldOptions.FeatureSupport.
+const (
+ FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport"
+ FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport"
+)
+
+// Field names for google.protobuf.FieldOptions.FeatureSupport.
+const (
+ FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced"
+ FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated"
+ FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning"
+ FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed"
+
+ FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced"
+ FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated"
+ FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning"
+ FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed"
+)
+
+// Field numbers for google.protobuf.FieldOptions.FeatureSupport.
+const (
+ FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1
+ FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2
+ FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3
+ FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4
+)
+
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@@ -829,11 +860,13 @@ const (
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumValueOptions_Features_field_name protoreflect.Name = "features"
EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
+ EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
+ EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support"
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
)
@@ -842,6 +875,7 @@ const (
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
+ EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -1110,17 +1144,20 @@ const (
// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
+ FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features"
+ FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features"
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
+ FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features"
+ FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features"
)
// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
+ FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4
+ FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5
)
// Names for google.protobuf.SourceCodeInfo.
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index fd9015e8e..9a652a2b4 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -10,7 +10,7 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
-const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto"
+const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
// Names for google.protobuf.GoFeatures.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
index a371f98de..5d5771c2e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -22,13 +22,13 @@ type Export struct{}
// NewError formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
-func (Export) NewError(f string, x ...interface{}) error {
+func (Export) NewError(f string, x ...any) error {
return errors.New(f, x...)
}
// enum is any enum type generated by protoc-gen-go
// and must be a named int32 type.
-type enum = interface{}
+type enum = any
// EnumOf returns the protoreflect.Enum interface over e.
// It returns nil if e is nil.
@@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu
// message is any message type generated by protoc-gen-go
// and must be a pointer to a named struct type.
-type message = interface{}
+type message = any
// legacyMessageWrapper wraps a v2 message as a v1 message.
type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index bff041edc..f29e6a8fa 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error {
}
for _, x := range *ext {
ei := getExtensionFieldInfo(x.Type())
- if ei.funcs.isInit == nil {
+ if ei.funcs.isInit == nil || x.isUnexpandedLazy() {
continue
}
v := x.Value()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 2b8f122c2..4bb0a7a20 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool {
return false
}
+// isUnexpandedLazy returns true if the ExensionField is lazy and not
+// yet expanded, which means it's present and already checked for
+// initialized required fields.
+func (f *ExtensionField) isUnexpandedLazy() bool {
+ return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
+}
+
+// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded.
+//
+// The returned buffer has to be kept over whatever operation we're planning,
+// as re-retrieving it will fail after the message is lazily decoded.
+func (f *ExtensionField) lazyBuffer() []byte {
+ // This function might be in the critical path, so check the atomic without
+ // taking a look first, then only take the lock if needed.
+ if !f.isUnexpandedLazy() {
+ return nil
+ }
+ f.lazy.mu.Lock()
+ defer f.lazy.mu.Unlock()
+ return f.lazy.b
+}
+
func (f *ExtensionField) lazyInit() {
f.lazy.mu.Lock()
defer f.lazy.mu.Unlock()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 3fadd241e..78ee47e44 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int {
}
func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ calculatedSize := f.mi.sizePointer(p.Elem(), opts)
b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
- return f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
+ b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
+ return b, err
}
func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
@@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error {
return f.mi.checkInitializedPointer(p.Elem())
}
-func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
- return protowire.SizeBytes(proto.Size(m)) + tagsize
+func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int {
+ return protowire.SizeBytes(opts.Options().Size(m)) + tagsize
}
func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ mopts := opts.Options()
+ calculatedSize := mopts.Size(m)
b = protowire.AppendVarint(b, wiretag)
- b = protowire.AppendVarint(b, uint64(proto.Size(m)))
- return opts.Options().MarshalAppend(b, m)
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
+ b, err := mopts.MarshalAppend(b, m)
+ if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
+ return b, err
}
func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
@@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf
return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
}
-func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
- return 2*tagsize + proto.Size(m)
+func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int {
+ return 2*tagsize + opts.Options().Size(m)
}
func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
@@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal
b = protowire.AppendVarint(b, f.wiretag)
siz := f.mi.sizePointer(v, opts)
b = protowire.AppendVarint(b, uint64(siz))
+ before := len(b)
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
}
return b, nil
}
@@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error {
return nil
}
-func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
+func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
s := p.PointerSlice()
n := 0
for _, v := range s {
m := asMessage(v.AsValueOf(goType.Elem()))
- n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ n += protowire.SizeBytes(mopts.Size(m)) + tagsize
}
return n
}
func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
+ mopts := opts.Options()
s := p.PointerSlice()
var err error
for _, v := range s {
m := asMessage(v.AsValueOf(goType.Elem()))
b = protowire.AppendVarint(b, wiretag)
- siz := proto.Size(m)
+ siz := mopts.Size(m)
b = protowire.AppendVarint(b, uint64(siz))
- b, err = opts.Options().MarshalAppend(b, m)
+ before := len(b)
+ b, err = mopts.MarshalAppend(b, m)
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
}
return b, nil
}
@@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error {
// Slices of messages
func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
- n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ n += protowire.SizeBytes(mopts.Size(m)) + tagsize
}
return n
}
@@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64,
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
b = protowire.AppendVarint(b, wiretag)
- siz := proto.Size(m)
+ siz := mopts.Size(m)
b = protowire.AppendVarint(b, uint64(siz))
+ before := len(b)
var err error
b, err = mopts.MarshalAppend(b, m)
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
}
return b, nil
}
@@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{
}
func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
- n += 2*tagsize + proto.Size(m)
+ n += 2*tagsize + mopts.Size(m)
}
return n
}
@@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type)
}
}
-func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
+func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
s := p.PointerSlice()
n := 0
for _, v := range s {
m := asMessage(v.AsValueOf(messageType.Elem()))
- n += 2*tagsize + proto.Size(m)
+ n += 2*tagsize + mopts.Size(m)
}
return n
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index 111b9d16f..fb35f0bae 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -9,6 +9,7 @@ import (
"sort"
"google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
)
@@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder
size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
size += mapi.valFuncs.size(val, mapValTagSize, opts)
b = protowire.AppendVarint(b, uint64(size))
+ before := len(b)
b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
if err != nil {
return nil, err
}
- return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ if measuredSize := len(b) - before; size != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(size, measuredSize)
+ }
+ return b, err
} else {
key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
val := pointerOfValue(valrv)
@@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder
}
b = protowire.AppendVarint(b, mapi.valWiretag)
b = protowire.AppendVarint(b, uint64(valSize))
- return f.mi.marshalAppendPointer(b, val, opts)
+ before := len(b)
+ b, err = f.mi.marshalAppendPointer(b, val, opts)
+ if measuredSize := len(b) - before; valSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(valSize, measuredSize)
+ }
+ return b, err
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
index b7a23faf1..7a16ec13d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int)
}
num, _ := protowire.DecodeTag(xi.wiretag)
size += messageset.SizeField(num)
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension, instead use the buffer to calculate size
+ if lb := x.lazyBuffer(); lb != nil {
+ // We got hold of the buffer, so it's still lazy.
+ // Don't count the tag size in the extension buffer, it's already added.
+ size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize
+ continue
+ }
+ }
size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
}
@@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma
xi := getExtensionFieldInfo(x.Type())
num, _ := protowire.DecodeTag(xi.wiretag)
b = messageset.AppendFieldStart(b, num)
+
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension if it's still in wire format, instead use the buffer content.
+ if lb := x.lazyBuffer(); lb != nil {
+ // The tag inside the lazy buffer is a different tag (the extension
+ // number), but what we need here is the tag for FieldMessage:
+ b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType))
+ b = append(b, lb[xi.tagsize:]...)
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+ }
+ }
+
b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
if err != nil {
return b, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index 185ef2efa..e06ece55a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -14,7 +14,7 @@ import (
// unwrapper unwraps the value to the underlying value.
// This is implemented by List and Map.
type unwrapper interface {
- protoUnwrap() interface{}
+ protoUnwrap() any
}
// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
index f89136516..18cb96fd7 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value {
func (ls *listReflect) IsValid() bool {
return !ls.v.IsNil()
}
-func (ls *listReflect) protoUnwrap() interface{} {
+func (ls *listReflect) protoUnwrap() any {
return ls.v.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index f30b0a057..304244a65 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value {
func (ms *mapReflect) IsValid() bool {
return !ms.v.IsNil()
}
-func (ms *mapReflect) protoUnwrap() interface{} {
+func (ms *mapReflect) protoUnwrap() any {
return ms.v.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index 845c67d6e..febd21224 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) {
return 0
}
if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
- if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
- return int(size)
+ // The size cache contains the size + 1, to allow the
+ // zero value to be invalid, while also allowing for a
+ // 0 size to be cached.
+ if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 {
+ return int(size - 1)
}
}
return mi.sizePointerSlow(p, opts)
@@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
if flags.ProtoLegacy && mi.isMessageSet {
size = sizeMessageSet(mi, p, opts)
if mi.sizecacheOffset.IsValid() {
- atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
}
return size
}
@@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int
}
}
if mi.sizecacheOffset.IsValid() {
- if size > math.MaxInt32 {
+ if size > (math.MaxInt32 - 1) {
// The size is too large for the int32 sizecache field.
// We will need to recompute the size when encoding;
// unfortunately expensive, but better than invalid output.
- atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0)
} else {
- atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ // The size cache contains the size + 1, to allow the
+ // zero value to be invalid, while also allowing for a
+ // 0 size to be cached.
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
}
}
return size
@@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt
return b, nil
}
+// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal.
+func fullyLazyExtensions(opts marshalOptions) bool {
+ // When deterministic marshaling is requested, force an unmarshal for lazy
+ // extensions to produce a deterministic result, instead of passing through
+ // bytes lazily that may or may not match what Go Protobuf would produce.
+ return opts.flags&piface.MarshalDeterministic == 0
+}
+
func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
if ext == nil {
return 0
@@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha
if xi.funcs.size == nil {
continue
}
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension, instead use the buffer to calculate size
+ if lb := x.lazyBuffer(); lb != nil {
+ // We got hold of the buffer, so it's still lazy.
+ n += len(lb)
+ continue
+ }
+ }
n += xi.funcs.size(x.Value(), xi.tagsize, opts)
}
return n
@@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
var err error
for _, x := range *ext {
xi := getExtensionFieldInfo(x.Type())
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension if it's still in wire format, instead use the buffer content.
+ if lb := x.lazyBuffer(); lb != nil {
+ b = append(b, lb...)
+ continue
+ }
+ }
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
}
return b, err
@@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField,
for _, k := range keys {
x := (*ext)[int32(k)]
xi := getExtensionFieldInfo(x.Type())
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension if it's still in wire format, instead use the buffer content.
+ if lb := x.lazyBuffer(); lb != nil {
+ b = append(b, lb...)
+ continue
+ }
+ }
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
if err != nil {
return b, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
index cb25b0bae..e31249f64 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -53,7 +53,7 @@ type ExtensionInfo struct {
// type returned by InterfaceOf may not be identical.
//
// Deprecated: Use InterfaceOf(xt.Zero()) instead.
- ExtensionType interface{}
+ ExtensionType any
// Field is the field number of the extension.
//
@@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value {
func (xi *ExtensionInfo) Zero() protoreflect.Value {
return xi.lazyInit().Zero()
}
-func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value {
+func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value {
return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
}
-func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} {
+func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any {
return xi.lazyInit().GoValueOf(v).Interface()
}
func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool {
return xi.lazyInit().IsValidPB(v)
}
-func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
+func (xi *ExtensionInfo) IsValidInterface(v any) bool {
return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
}
func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
index c2a803bb2..81b2b1a76 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber {
func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum {
return e
}
-func (e *legacyEnumWrapper) protoUnwrap() interface{} {
+func (e *legacyEnumWrapper) protoUnwrap() any {
v := reflect.New(e.goTyp).Elem()
v.SetInt(int64(e.num))
return v.Interface()
@@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor {
ed := &filedesc.Enum{L2: new(filedesc.EnumL2)}
ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum
ed.L0.ParentFile = filedesc.SurrogateProto3
+ ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures
ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{})
// TODO: Use the presence of a UnmarshalJSON method to determine proto2?
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 87b30d050..6e8677ee6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() {
xd.L1.Number = protoreflect.FieldNumber(xi.Field)
xd.L1.Cardinality = fd.L1.Cardinality
xd.L1.Kind = fd.L1.Kind
- xd.L2.IsPacked = fd.L1.IsPacked
+ xd.L1.EditionFeatures = fd.L1.EditionFeatures
xd.L2.Default = fd.L1.Default
xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType)
xd.L2.Enum = ed
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
index 9ab091086..b649f1124 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
@@ -7,7 +7,7 @@ package impl
import (
"bytes"
"compress/gzip"
- "io/ioutil"
+ "io"
"sync"
"google.golang.org/protobuf/internal/filedesc"
@@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor {
if err != nil {
panic(err)
}
- b2, err := ioutil.ReadAll(zr)
+ b2, err := io.ReadAll(zr)
if err != nil {
panic(err)
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 2ab2c6297..bf0b6049b 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
}
}
+ md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures
// Obtain a list of oneof wrapper types.
var oneofWrappers []reflect.Type
methods := make([]reflect.Method, 0, 2)
@@ -215,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
}
for _, fn := range methods {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
+ if vs, ok := v.Interface().([]any); ok {
for _, v := range vs {
oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
@@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
od := &md.L2.Oneofs.List[n]
od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
od.L0.ParentFile = md.L0.ParentFile
+ od.L1.EditionFeatures = md.L1.EditionFeatures
od.L0.Parent = md
od.L0.Index = n
@@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
aberrantAppendField(md, f.Type, tag, "", "")
fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
fd.L1.ContainingOneof = od
+ fd.L1.EditionFeatures = od.L1.EditionFeatures
od.L1.Fields.List = append(od.L1.Fields.List, fd)
}
}
@@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
fd.L0.Parent = md
fd.L0.Index = n
- if fd.L1.IsWeak || fd.L1.HasPacked {
+ if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
if fd.L1.IsWeak {
opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
}
- if fd.L1.HasPacked {
- opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
+ if fd.L1.EditionFeatures.IsPacked {
+ opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
}
return opts.Interface()
}
@@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
md2.L0.ParentFile = md.L0.ParentFile
md2.L0.Parent = md
md2.L0.Index = n
+ md2.L1.EditionFeatures = md.L1.EditionFeatures
md2.L1.IsMapEntry = true
md2.L2.Options = func() protoreflect.ProtoMessage {
@@ -563,6 +567,6 @@ func (m aberrantMessage) IsValid() bool {
func (m aberrantMessage) ProtoMethods() *protoiface.Methods {
return aberrantProtoMethods
}
-func (m aberrantMessage) protoUnwrap() interface{} {
+func (m aberrantMessage) protoUnwrap() any {
return m.v.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 629bacdce..019399d45 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -35,7 +35,7 @@ type MessageInfo struct {
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
- OneofWrappers []interface{}
+ OneofWrappers []any
initMu sync.Mutex // protects all unexported fields
initDone uint32
@@ -47,7 +47,7 @@ type MessageInfo struct {
// exporter is a function that returns a reference to the ith field of v,
// where v is a pointer to a struct. It returns nil if it does not support
// exporting the requested field (e.g., already exported).
-type exporter func(v interface{}, i int) interface{}
+type exporter func(v any, i int) any
// getMessageInfo returns the MessageInfo for any message type that
// is generated by our implementation of protoc-gen-go (for v2 and on).
@@ -201,7 +201,7 @@ fieldLoop:
}
for _, fn := range methods {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
+ if vs, ok := v.Interface().([]any); ok {
oneofWrappers = vs
}
}
@@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
type mapEntryType struct {
desc protoreflect.MessageDescriptor
- valType interface{} // zero value of enum or message type
+ valType any // zero value of enum or message type
}
func (mt mapEntryType) New() protoreflect.Message {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index d9ea010be..ecb4623d7 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -20,7 +20,7 @@ type reflectMessageInfo struct {
// fieldTypes contains the zero value of an enum or message field.
// For lists, it contains the element type.
// For maps, it contains the entry value type.
- fieldTypes map[protoreflect.FieldNumber]interface{}
+ fieldTypes map[protoreflect.FieldNumber]any
// denseFields is a subset of fields where:
// 0 < fieldDesc.Number() < len(denseFields)
@@ -28,7 +28,7 @@ type reflectMessageInfo struct {
denseFields []*fieldInfo
// rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
- rangeInfos []interface{} // either *fieldInfo or *oneofInfo
+ rangeInfos []any // either *fieldInfo or *oneofInfo
getUnknown func(pointer) protoreflect.RawFields
setUnknown func(pointer, protoreflect.RawFields)
@@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
if ft != nil {
if mi.fieldTypes == nil {
- mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{})
+ mi.fieldTypes = make(map[protoreflect.FieldNumber]any)
}
mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
}
@@ -247,39 +247,39 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V
}
}
}
-func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) {
+func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) {
if m == nil {
return false
}
- xd := xt.TypeDescriptor()
x, ok := (*m)[int32(xd.Number())]
if !ok {
return false
}
+ if x.isUnexpandedLazy() {
+ // Avoid calling x.Value(), which triggers a lazy unmarshal.
+ return true
+ }
switch {
case xd.IsList():
return x.Value().List().Len() > 0
case xd.IsMap():
return x.Value().Map().Len() > 0
- case xd.Message() != nil:
- return x.Value().Message().IsValid()
}
return true
}
-func (m *extensionMap) Clear(xt protoreflect.ExtensionType) {
- delete(*m, int32(xt.TypeDescriptor().Number()))
+func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) {
+ delete(*m, int32(xd.Number()))
}
-func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value {
- xd := xt.TypeDescriptor()
+func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value {
if m != nil {
if x, ok := (*m)[int32(xd.Number())]; ok {
return x.Value()
}
}
- return xt.Zero()
+ return xd.Type().Zero()
}
-func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) {
- xd := xt.TypeDescriptor()
+func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) {
+ xt := xd.Type()
isValid := true
switch {
case !xt.IsValidValue(v):
@@ -292,7 +292,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value)
isValid = v.Message().IsValid()
}
if !isValid {
- panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
+ panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName()))
}
if *m == nil {
@@ -302,16 +302,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value)
x.Set(xt, v)
(*m)[int32(xd.Number())] = x
}
-func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value {
- xd := xt.TypeDescriptor()
+func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value {
if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() {
panic("invalid Mutable on field with non-composite type")
}
if x, ok := (*m)[int32(xd.Number())]; ok {
return x.Value()
}
- v := xt.New()
- m.Set(xt, v)
+ v := xd.Type().New()
+ m.Set(xd, v)
return v
}
@@ -394,7 +393,7 @@ var (
// MessageOf returns a reflective view over a message. The input must be a
// pointer to a named Go struct. If the provided type has a ProtoReflect method,
// it must be implemented by calling this method.
-func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message {
+func (mi *MessageInfo) MessageOf(m any) protoreflect.Message {
if reflect.TypeOf(m) != mi.GoReflectType {
panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
}
@@ -422,13 +421,13 @@ func (m *messageIfaceWrapper) Reset() {
func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message {
return (*messageReflectWrapper)(m)
}
-func (m *messageIfaceWrapper) protoUnwrap() interface{} {
+func (m *messageIfaceWrapper) protoUnwrap() any {
return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
}
// checkField verifies that the provided field descriptor is valid.
// Exactly one of the returned values is populated.
-func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) {
+func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) {
var fi *fieldInfo
if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
fi = mi.denseFields[n]
@@ -457,7 +456,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo,
if !ok {
panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
}
- return nil, xtd.Type()
+ return nil, xtd
}
panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
index 741d6e5b6..99dc23c6f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -23,12 +23,13 @@ func (m *messageState) New() protoreflect.Message {
func (m *messageState) Interface() protoreflect.ProtoMessage {
return m.protoUnwrap().(protoreflect.ProtoMessage)
}
-func (m *messageState) protoUnwrap() interface{} {
+func (m *messageState) protoUnwrap() any {
return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
}
func (m *messageState) ProtoMethods() *protoiface.Methods {
- m.messageInfo().init()
- return &m.messageInfo().methods
+ mi := m.messageInfo()
+ mi.init()
+ return &mi.methods
}
// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
@@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo {
}
func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- m.messageInfo().init()
- for _, ri := range m.messageInfo().rangeInfos {
+ mi := m.messageInfo()
+ mi.init()
+ for _, ri := range mi.rangeInfos {
switch ri := ri.(type) {
case *fieldInfo:
if ri.has(m.pointer()) {
@@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V
}
case *oneofInfo:
if n := ri.which(m.pointer()); n > 0 {
- fi := m.messageInfo().fields[n]
+ fi := mi.fields[n]
if !f(fi.fieldDesc, fi.get(m.pointer())) {
return
}
}
}
}
- m.messageInfo().extensionMap(m.pointer()).Range(f)
+ mi.extensionMap(m.pointer()).Range(f)
}
func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.has(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ return mi.extensionMap(m.pointer()).Has(xd)
}
}
func (m *messageState) Clear(fd protoreflect.FieldDescriptor) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.clear(m.pointer())
} else {
- m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ mi.extensionMap(m.pointer()).Clear(xd)
}
}
func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.get(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ return mi.extensionMap(m.pointer()).Get(xd)
}
}
func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.set(m.pointer(), v)
} else {
- m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ mi.extensionMap(m.pointer()).Set(xd, v)
}
}
func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.mutable(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ return mi.extensionMap(m.pointer()).Mutable(xd)
}
}
func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.newField()
} else {
- return xt.New()
+ return xd.Type().New()
}
}
func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
- m.messageInfo().init()
- if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ mi := m.messageInfo()
+ mi.init()
+ if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
return od.Fields().ByNumber(oi.which(m.pointer()))
}
panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
}
func (m *messageState) GetUnknown() protoreflect.RawFields {
- m.messageInfo().init()
- return m.messageInfo().getUnknown(m.pointer())
+ mi := m.messageInfo()
+ mi.init()
+ return mi.getUnknown(m.pointer())
}
func (m *messageState) SetUnknown(b protoreflect.RawFields) {
- m.messageInfo().init()
- m.messageInfo().setUnknown(m.pointer(), b)
+ mi := m.messageInfo()
+ mi.init()
+ mi.setUnknown(m.pointer(), b)
}
func (m *messageState) IsValid() bool {
return !m.pointer().IsNil()
@@ -143,12 +154,13 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage {
}
return (*messageIfaceWrapper)(m)
}
-func (m *messageReflectWrapper) protoUnwrap() interface{} {
+func (m *messageReflectWrapper) protoUnwrap() any {
return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
}
func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
- m.messageInfo().init()
- return &m.messageInfo().methods
+ mi := m.messageInfo()
+ mi.init()
+ return &mi.methods
}
// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
@@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo {
}
func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- m.messageInfo().init()
- for _, ri := range m.messageInfo().rangeInfos {
+ mi := m.messageInfo()
+ mi.init()
+ for _, ri := range mi.rangeInfos {
switch ri := ri.(type) {
case *fieldInfo:
if ri.has(m.pointer()) {
@@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto
}
case *oneofInfo:
if n := ri.which(m.pointer()); n > 0 {
- fi := m.messageInfo().fields[n]
+ fi := mi.fields[n]
if !f(fi.fieldDesc, fi.get(m.pointer())) {
return
}
}
}
}
- m.messageInfo().extensionMap(m.pointer()).Range(f)
+ mi.extensionMap(m.pointer()).Range(f)
}
func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.has(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ return mi.extensionMap(m.pointer()).Has(xd)
}
}
func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.clear(m.pointer())
} else {
- m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ mi.extensionMap(m.pointer()).Clear(xd)
}
}
func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.get(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ return mi.extensionMap(m.pointer()).Get(xd)
}
}
func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.set(m.pointer(), v)
} else {
- m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ mi.extensionMap(m.pointer()).Set(xd, v)
}
}
func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.mutable(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ return mi.extensionMap(m.pointer()).Mutable(xd)
}
}
func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.newField()
} else {
- return xt.New()
+ return xd.Type().New()
}
}
func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
- m.messageInfo().init()
- if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ mi := m.messageInfo()
+ mi.init()
+ if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
return od.Fields().ByNumber(oi.which(m.pointer()))
}
panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
}
func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
- m.messageInfo().init()
- return m.messageInfo().getUnknown(m.pointer())
+ mi := m.messageInfo()
+ mi.init()
+ return mi.getUnknown(m.pointer())
}
func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) {
- m.messageInfo().init()
- m.messageInfo().setUnknown(m.pointer(), b)
+ mi := m.messageInfo()
+ mi.init()
+ mi.setUnknown(m.pointer(), b)
}
func (m *messageReflectWrapper) IsValid() bool {
return !m.pointer().IsNil()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
index 517e94434..da685e8a2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -16,7 +16,7 @@ import (
const UnsafeEnabled = false
// Pointer is an opaque pointer type.
-type Pointer interface{}
+type Pointer any
// offset represents the offset to a struct field, accessible from a pointer.
// The offset is the field index into a struct.
@@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer {
}
// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v interface{}) pointer {
+func pointerOfIface(v any) pointer {
return pointer{v: reflect.ValueOf(v)}
}
@@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+func (p pointer) AsIfaceOf(t reflect.Type) any {
return p.AsValueOf(t).Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 4b020e311..5f20ca5d8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer {
}
// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v interface{}) pointer {
+func pointerOfIface(v any) pointer {
type ifaceHeader struct {
Type unsafe.Pointer
Data unsafe.Pointer
@@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+func (p pointer) AsIfaceOf(t reflect.Type) any {
// TODO: Use tricky unsafe magic to directly create ifaceHeader.
return p.AsValueOf(t).Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
index 1665a68e5..a1f09162d 100644
--- a/vendor/google.golang.org/protobuf/internal/order/range.go
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -18,7 +18,7 @@ type messageField struct {
}
var messageFieldPool = sync.Pool{
- New: func() interface{} { return new([]messageField) },
+ New: func() any { return new([]messageField) },
}
type (
@@ -69,7 +69,7 @@ type mapEntry struct {
}
var mapEntryPool = sync.Pool{
- New: func() interface{} { return new([]mapEntry) },
+ New: func() any { return new([]mapEntry) },
}
type (
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index a50fcfb49..dbbf1f686 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 33
- Patch = 0
+ Minor = 34
+ Patch = 2
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index e5b03b567..d75a6534c 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -51,6 +51,8 @@ type UnmarshalOptions struct {
// Unmarshal parses the wire-format message in b and places the result in m.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the [UnmarshalOptions] type if you need more control.
func Unmarshal(b []byte, m Message) error {
_, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect())
return err
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 4fed202f9..1f847bcc3 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -5,12 +5,17 @@
package proto
import (
+ "errors"
+ "fmt"
+
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
+
+ protoerrors "google.golang.org/protobuf/internal/errors"
)
// MarshalOptions configures the marshaler.
@@ -70,7 +75,32 @@ type MarshalOptions struct {
UseCachedSize bool
}
+// flags turns the specified MarshalOptions (user-facing) into
+// protoiface.MarshalInputFlags (used internally by the marshaler).
+//
+// See impl.marshalOptions.Options for the inverse operation.
+func (o MarshalOptions) flags() protoiface.MarshalInputFlags {
+ var flags protoiface.MarshalInputFlags
+
+ // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal,
+ // which is why it is not a part of MarshalInputFlags.
+
+ if o.Deterministic {
+ flags |= protoiface.MarshalDeterministic
+ }
+
+ if o.UseCachedSize {
+ flags |= protoiface.MarshalUseCachedSize
+ }
+
+ return flags
+}
+
// Marshal returns the wire-format encoding of m.
+//
+// This is the most common entry point for encoding a Protobuf message.
+//
+// See the [MarshalOptions] type if you need more control.
func Marshal(m Message) ([]byte, error) {
// Treat nil message interface as an empty message; nothing to output.
if m == nil {
@@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte {
// MarshalAppend appends the wire-format encoding of m to b,
// returning the result.
+//
+// This is a less common entry point than [Marshal], which is only needed if you
+// need to supply your own buffers for performance reasons.
func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
// Treat nil message interface as an empty message; nothing to append.
if m == nil {
@@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac
in := protoiface.MarshalInput{
Message: m,
Buf: b,
- }
- if o.Deterministic {
- in.Flags |= protoiface.MarshalDeterministic
- }
- if o.UseCachedSize {
- in.Flags |= protoiface.MarshalUseCachedSize
+ Flags: o.flags(),
}
if methods.Size != nil {
sout := methods.Size(protoiface.SizeInput{
@@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac
out.Buf, err = o.marshalMessageSlow(b, m)
}
if err != nil {
+ var mismatch *protoerrors.SizeMismatchError
+ if errors.As(err, &mismatch) {
+ return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err)
+ }
return out, err
}
if allowPartial {
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 17899a3a7..d248f2928 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -11,18 +11,21 @@ import (
// HasExtension reports whether an extension field is populated.
// It returns false if m is invalid or if xt does not extend m.
func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
- // Treat nil message interface as an empty message; no populated fields.
- if m == nil {
+ // Treat nil message interface or descriptor as an empty message; no populated
+ // fields.
+ if m == nil || xt == nil {
return false
}
// As a special-case, we reports invalid or mismatching descriptors
// as always not being populated (since they aren't).
- if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
+ mr := m.ProtoReflect()
+ xd := xt.TypeDescriptor()
+ if mr.Descriptor() != xd.ContainingMessage() {
return false
}
- return m.ProtoReflect().Has(xt.TypeDescriptor())
+ return mr.Has(xd)
}
// ClearExtension clears an extension field such that subsequent
@@ -36,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
// If the field is unpopulated, it returns the default value for
// scalars and an immutable, empty value for lists or messages.
// It panics if xt does not extend m.
-func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// Treat nil message interface as an empty message; return the default.
if m == nil {
return xt.InterfaceOf(xt.Zero())
@@ -48,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
// SetExtension stores the value of an extension field.
// It panics if m is invalid, xt does not extend m, or if type of v
// is invalid for the specified extension field.
-func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
xd := xt.TypeDescriptor()
pv := xt.ValueOf(v)
@@ -75,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
// It returns immediately if f returns false.
// While iterating, mutating operations may only be performed
// on the current extension field.
-func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) {
// Treat nil message interface as an empty message; nothing to range over.
if m == nil {
return
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
index 312d5d45c..575d14831 100644
--- a/vendor/google.golang.org/protobuf/proto/messageset.go
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b
func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
b = messageset.AppendFieldStart(b, fd.Number())
b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
- b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
+ calculatedSize := o.Size(value.Message().Interface())
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
b, err := o.marshalMessage(b, value.Message())
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; calculatedSize != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
b = messageset.AppendFieldEnd(b)
return b, nil
}
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index f1692b49b..052fb5ae3 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) {
if methods != nil && methods.Size != nil {
out := methods.Size(protoiface.SizeInput{
Message: m,
+ Flags: o.flags(),
})
return out.Size
}
@@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) {
// This case is mainly used for legacy types with a Marshal method.
out, _ := methods.Marshal(protoiface.MarshalInput{
Message: m,
+ Flags: o.flags(),
})
return len(out.Buf)
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index baa0cc621..8fbecb4f5 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -13,6 +13,7 @@
package protodesc
import (
+ "google.golang.org/protobuf/internal/editionssupport"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/pragma"
@@ -91,15 +92,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
switch fd.GetSyntax() {
case "proto2", "":
f.L1.Syntax = protoreflect.Proto2
+ f.L1.Edition = filedesc.EditionProto2
case "proto3":
f.L1.Syntax = protoreflect.Proto3
+ f.L1.Edition = filedesc.EditionProto3
case "editions":
f.L1.Syntax = protoreflect.Editions
f.L1.Edition = fromEditionProto(fd.GetEdition())
default:
return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
}
- if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) {
+ if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
}
f.L1.Path = fd.GetName()
@@ -114,9 +117,7 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
opts = proto.Clone(opts).(*descriptorpb.FileOptions)
f.L2.Options = func() protoreflect.ProtoMessage { return opts }
}
- if f.L1.Syntax == protoreflect.Editions {
- initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
- }
+ initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
for _, i := range fd.GetPublicDependency() {
@@ -219,10 +220,10 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil {
return nil, err
}
- if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil {
+ if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil {
return nil, err
}
- if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil {
+ if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil {
return nil, err
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index b3278163c..856175542 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -69,9 +69,7 @@ func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProt
if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
return nil, err
}
- if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
- m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
- }
+ m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
if opts := md.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
m.L2.Options = func() protoreflect.ProtoMessage { return opts }
@@ -146,13 +144,15 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil {
return nil, err
}
+ f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
f.L1.IsProto3Optional = fd.GetProto3Optional()
if opts := fd.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
f.L1.Options = func() protoreflect.ProtoMessage { return opts }
f.L1.IsWeak = opts.GetWeak()
- f.L1.HasPacked = opts.Packed != nil
- f.L1.IsPacked = opts.GetPacked()
+ if opts.Packed != nil {
+ f.L1.EditionFeatures.IsPacked = opts.GetPacked()
+ }
}
f.L1.Number = protoreflect.FieldNumber(fd.GetNumber())
f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel())
@@ -163,32 +163,12 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
f.L1.StringName.InitJSON(fd.GetJsonName())
}
- if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
- f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
-
- if f.L1.EditionFeatures.IsLegacyRequired {
- f.L1.Cardinality = protoreflect.Required
- }
- // We reuse the existing field because the old option `[packed =
- // true]` is mutually exclusive with the editions feature.
- if canBePacked(fd) {
- f.L1.HasPacked = true
- f.L1.IsPacked = f.L1.EditionFeatures.IsPacked
- }
-
- // We pretend this option is always explicitly set because the only
- // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8
- // or to return the appropriate default.
- // When using editions we either parse the option or resolve the
- // appropriate default here (instead of later when this option is
- // requested from the descriptor).
- // In proto2/proto3 syntax HasEnforceUTF8 might be false.
- f.L1.HasEnforceUTF8 = true
- f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated
+ if f.L1.EditionFeatures.IsLegacyRequired {
+ f.L1.Cardinality = protoreflect.Required
+ }
- if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
- f.L1.Kind = protoreflect.GroupKind
- }
+ if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
+ f.L1.Kind = protoreflect.GroupKind
}
}
return fs, nil
@@ -201,12 +181,10 @@ func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDesc
if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil {
return nil, err
}
+ o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures())
if opts := od.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.OneofOptions)
o.L1.Options = func() protoreflect.ProtoMessage { return opts }
- if parent.Syntax() == protoreflect.Editions {
- o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures())
- }
}
}
return os, nil
@@ -220,10 +198,13 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil {
return nil, err
}
+ x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures())
if opts := xd.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
x.L2.Options = func() protoreflect.ProtoMessage { return opts }
- x.L2.IsPacked = opts.GetPacked()
+ if opts.Packed != nil {
+ x.L1.EditionFeatures.IsPacked = opts.GetPacked()
+ }
}
x.L1.Number = protoreflect.FieldNumber(xd.GetNumber())
x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel())
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
index 254ca5854..f3cebab29 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
}
+ if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
+ // A map field might inherit delimited encoding from a file-wide default feature.
+ // But maps never actually use delimited encoding. (At least for now...)
+ f.L1.Kind = protoreflect.MessageKind
+ }
if fd.DefaultValue != nil {
v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
if err != nil {
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
index e4dcaf876..6de31c2eb 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -45,11 +45,11 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri
if allowAlias && !foundAlias {
return errors.New("enum %q allows aliases, but none were found", e.FullName())
}
- if e.Syntax() == protoreflect.Proto3 {
+ if !e.IsClosed() {
if v := e.Values().Get(0); v.Number() != 0 {
- return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName())
+ return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName())
}
- // Verify that value names in proto3 do not conflict if the
+ // Verify that value names in open enums do not conflict if the
// case-insensitive prefix is removed.
// See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055
names := map[string]protoreflect.EnumValueDescriptor{}
@@ -58,7 +58,7 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri
v1 := e.Values().Get(i)
s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix))
if v2, ok := names[s]; ok && v1.Number() != v2.Number() {
- return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
+ return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
}
names[s] = v1
}
@@ -80,7 +80,9 @@ func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescri
return nil
}
-func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
+func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
+ // There are a few limited exceptions only for proto3
+ isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3)
for i, md := range mds {
m := &ms[i]
@@ -107,25 +109,13 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
if isMessageSet && !flags.ProtoLegacy {
return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName())
}
- if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
+ if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
return errors.New("message %q is an invalid proto1 MessageSet", m.FullName())
}
- if m.Syntax() == protoreflect.Proto3 {
+ if isProto3 {
if m.ExtensionRanges().Len() > 0 {
return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
}
- // Verify that field names in proto3 do not conflict if lowercased
- // with all underscores removed.
- // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847
- names := map[string]protoreflect.FieldDescriptor{}
- for i := 0; i < m.Fields().Len(); i++ {
- f1 := m.Fields().Get(i)
- s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1)
- if f2, ok := names[s]; ok {
- return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name())
- }
- names[s] = f1
- }
}
for j, fd := range md.GetField() {
@@ -149,7 +139,7 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee())
}
if f.L1.IsProto3Optional {
- if f.Syntax() != protoreflect.Proto3 {
+ if !isProto3 {
return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName())
}
if f.Cardinality() != protoreflect.Optional {
@@ -162,26 +152,29 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
if f.IsWeak() && !flags.ProtoLegacy {
return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
}
- if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
+ if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
return errors.New("message field %q may only be weak for an optional message", f.FullName())
}
if f.IsPacked() && !isPackable(f) {
return errors.New("message field %q is not packable", f.FullName())
}
- if err := checkValidGroup(f); err != nil {
+ if err := checkValidGroup(file, f); err != nil {
return errors.New("message field %q is an invalid group: %v", f.FullName(), err)
}
if err := checkValidMap(f); err != nil {
return errors.New("message field %q is an invalid map: %v", f.FullName(), err)
}
- if f.Syntax() == protoreflect.Proto3 {
+ if isProto3 {
if f.Cardinality() == protoreflect.Required {
return errors.New("message field %q using proto3 semantics cannot be required", f.FullName())
}
- if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 {
- return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName())
+ if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
+ return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName())
}
}
+ if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
+ return errors.New("message field %q with implicit presence may only use open enums", f.FullName())
+ }
}
seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs
for j := range md.GetOneofDecl() {
@@ -215,17 +208,17 @@ func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.Desc
if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil {
return err
}
- if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil {
+ if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil {
return err
}
- if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil {
+ if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil {
return err
}
}
return nil
}
-func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
+func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
for i, xd := range xds {
x := &xs[i]
// NOTE: Avoid using the IsValid method since extensions to MessageSet
@@ -267,13 +260,13 @@ func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.
if x.IsPacked() && !isPackable(x) {
return errors.New("extension field %q is not packable", x.FullName())
}
- if err := checkValidGroup(x); err != nil {
+ if err := checkValidGroup(f, x); err != nil {
return errors.New("extension field %q is an invalid group: %v", x.FullName(), err)
}
if md := x.Message(); md != nil && md.IsMapEntry() {
return errors.New("extension field %q cannot be a map entry", x.FullName())
}
- if x.Syntax() == protoreflect.Proto3 {
+ if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) {
switch x.ContainingMessage().FullName() {
case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName():
case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName():
@@ -309,21 +302,25 @@ func isPackable(fd protoreflect.FieldDescriptor) bool {
// checkValidGroup reports whether fd is a valid group according to the same
// rules that protoc imposes.
-func checkValidGroup(fd protoreflect.FieldDescriptor) error {
+func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error {
md := fd.Message()
switch {
case fd.Kind() != protoreflect.GroupKind:
return nil
- case fd.Syntax() == protoreflect.Proto3:
+ case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3):
return errors.New("invalid under proto3 semantics")
case md == nil || md.IsPlaceholder():
return errors.New("message must be resolvable")
- case fd.FullName().Parent() != md.FullName().Parent():
- return errors.New("message and field must be declared in the same scope")
- case !unicode.IsUpper(rune(md.Name()[0])):
- return errors.New("message name must start with an uppercase")
- case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
- return errors.New("field name must be lowercased form of the message name")
+ }
+ if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) {
+ switch {
+ case fd.FullName().Parent() != md.FullName().Parent():
+ return errors.New("message and field must be declared in the same scope")
+ case !unicode.IsUpper(rune(md.Name()[0])):
+ return errors.New("message name must start with an uppercase")
+ case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
+ return errors.New("field name must be lowercased form of the message name")
+ }
}
return nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 2a6b29d17..804830eda 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -17,11 +17,6 @@ import (
gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
)
-const (
- SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2
- SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023
-)
-
var defaults = &descriptorpb.FeatureSetDefaults{}
var defaultsCacheMu sync.Mutex
var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet)
@@ -67,18 +62,20 @@ func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
os.Exit(1)
}
- fs := defaults.GetDefaults()[0].GetFeatures()
+ fsed := defaults.GetDefaults()[0]
// Using a linear search for now.
// Editions are guaranteed to be sorted and thus we could use a binary search.
// Given that there are only a handful of editions (with one more per year)
// there is not much reason to use a binary search.
for _, def := range defaults.GetDefaults() {
if def.GetEdition() <= edpb {
- fs = def.GetFeatures()
+ fsed = def
} else {
break
}
}
+ fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet)
+ proto.Merge(fs, fsed.GetOverridableFeatures())
defaultsCache[ed] = fs
return fs
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index 9d6e05420..a5de8d400 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -73,6 +73,16 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
p.Syntax = proto.String(file.Syntax().String())
}
+ if file.Syntax() == protoreflect.Editions {
+ desc := file
+ if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
+ desc = fileImportDesc.FileDescriptor
+ }
+
+ if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
+ p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
+ }
+ }
return p
}
@@ -153,6 +163,18 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi
if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
p.Proto3Optional = proto.Bool(true)
}
+ if field.Syntax() == protoreflect.Editions {
+ // Editions have no group keyword, this type is only set so that downstream users continue
+ // treating this as delimited encoding.
+ if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
+ }
+ // Editions have no required keyword, this label is only set so that downstream users continue
+ // treating it as required.
+ if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
+ p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+ }
if field.HasDefault() {
def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor)
if err != nil && field.DefaultEnumValue() != nil {
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index 00b01fbd8..c85bfaa5b 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -161,7 +161,7 @@ const (
// IsValid reports whether the syntax is valid.
func (s Syntax) IsValid() bool {
switch s {
- case Proto2, Proto3:
+ case Proto2, Proto3, Editions:
return true
default:
return false
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index 7dcc2ff09..ea154eec4 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault)
case 21:
b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
+ case 22:
+ b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -483,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 3:
b = p.appendSingularField(b, "debug_redact", nil)
+ case 4:
+ b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -519,6 +523,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte {
return b
}
+func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "edition_introduced", nil)
+ case 2:
+ b = p.appendSingularField(b, "edition_deprecated", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecation_warning", nil)
+ case 4:
+ b = p.appendSingularField(b, "edition_removed", nil)
+ }
+ return b
+}
+
func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
if len(*p) == 0 {
return b
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 60ff62b4c..cd8fadbaf 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -510,7 +510,7 @@ type ExtensionType interface {
//
// ValueOf is more extensive than protoreflect.ValueOf for a given field's
// value as it has more type information available.
- ValueOf(interface{}) Value
+ ValueOf(any) Value
// InterfaceOf completely unwraps the Value to the underlying Go type.
// InterfaceOf panics if the input is nil or does not represent the
@@ -519,13 +519,13 @@ type ExtensionType interface {
//
// InterfaceOf is able to unwrap the Value further than Value.Interface
// as it has more type information available.
- InterfaceOf(Value) interface{}
+ InterfaceOf(Value) any
// IsValidValue reports whether the Value is valid to assign to the field.
IsValidValue(Value) bool
// IsValidInterface reports whether the input is valid to assign to the field.
- IsValidInterface(interface{}) bool
+ IsValidInterface(any) bool
}
// EnumDescriptor describes an enum and
@@ -544,6 +544,12 @@ type EnumDescriptor interface {
// ReservedRanges is a list of reserved ranges of enum numbers.
ReservedRanges() EnumRanges
+ // IsClosed reports whether this enum uses closed semantics.
+ // See https://protobuf.dev/programming-guides/enum/#definitions.
+ // Note: the Go protobuf implementation is not spec compliant and treats
+ // all enums as open enums.
+ IsClosed() bool
+
isEnumDescriptor
}
type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
index 7ced876f4..75f83a2af 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
@@ -32,11 +32,11 @@ const (
type value struct {
pragma.DoNotCompare // 0B
- typ valueType // 8B
- num uint64 // 8B
- str string // 16B
- bin []byte // 24B
- iface interface{} // 16B
+ typ valueType // 8B
+ num uint64 // 8B
+ str string // 16B
+ bin []byte // 24B
+ iface any // 16B
}
func valueOfString(v string) Value {
@@ -45,7 +45,7 @@ func valueOfString(v string) Value {
func valueOfBytes(v []byte) Value {
return Value{typ: bytesType, bin: v}
}
-func valueOfIface(v interface{}) Value {
+func valueOfIface(v any) Value {
return Value{typ: ifaceType, iface: v}
}
@@ -55,6 +55,6 @@ func (v Value) getString() string {
func (v Value) getBytes() []byte {
return v.bin
}
-func (v Value) getIface() interface{} {
+func (v Value) getIface() any {
return v.iface
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index 160309731..9fe83cef5 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -69,8 +69,8 @@ import (
// composite Value. Modifying an empty, read-only value panics.
type Value value
-// The protoreflect API uses a custom Value union type instead of interface{}
-// to keep the future open for performance optimizations. Using an interface{}
+// The protoreflect API uses a custom Value union type instead of any
+// to keep the future open for performance optimizations. Using an any
// always incurs an allocation for primitives (e.g., int64) since it needs to
// be boxed on the heap (as interfaces can only contain pointers natively).
// Instead, we represent the Value union as a flat struct that internally keeps
@@ -85,7 +85,7 @@ type Value value
// ValueOf returns a Value initialized with the concrete value stored in v.
// This panics if the type does not match one of the allowed types in the
// Value union.
-func ValueOf(v interface{}) Value {
+func ValueOf(v any) Value {
switch v := v.(type) {
case nil:
return Value{}
@@ -192,10 +192,10 @@ func (v Value) IsValid() bool {
return v.typ != nilType
}
-// Interface returns v as an interface{}.
+// Interface returns v as an any.
//
// Invariant: v == ValueOf(v).Interface()
-func (v Value) Interface() interface{} {
+func (v Value) Interface() any {
switch v.typ {
case nilType:
return nil
@@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool {
return Value(k).IsValid()
}
-// Interface returns k as an interface{}.
-func (k MapKey) Interface() interface{} {
+// Interface returns k as an any.
+func (k MapKey) Interface() any {
return Value(k).Interface()
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index b1fdbe3e8..7f3583ead 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -45,7 +45,7 @@ var (
// typeOf returns a pointer to the Go type information.
// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
+func typeOf(t any) unsafe.Pointer {
return (*ifaceHeader)(unsafe.Pointer(&t)).Type
}
@@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value {
p := (*sliceHeader)(unsafe.Pointer(&v))
return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
}
-func valueOfIface(v interface{}) Value {
+func valueOfIface(v any) Value {
p := (*ifaceHeader)(unsafe.Pointer(&v))
return Value{typ: p.Type, ptr: p.Data}
}
@@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) {
*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
return x
}
-func (v Value) getIface() (x interface{}) {
+func (v Value) getIface() (x any) {
*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
return x
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index 435470111..f7d386990 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -15,7 +15,7 @@ import (
type (
ifaceHeader struct {
- _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
+ _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
Type unsafe.Pointer
Data unsafe.Pointer
}
@@ -37,7 +37,7 @@ var (
// typeOf returns a pointer to the Go type information.
// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
+func typeOf(t any) unsafe.Pointer {
return (*ifaceHeader)(unsafe.Pointer(&t)).Type
}
@@ -70,7 +70,7 @@ func valueOfString(v string) Value {
func valueOfBytes(v []byte) Value {
return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
}
-func valueOfIface(v interface{}) Value {
+func valueOfIface(v any) Value {
p := (*ifaceHeader)(unsafe.Pointer(&v))
return Value{typ: p.Type, ptr: p.Data}
}
@@ -81,7 +81,7 @@ func (v Value) getString() string {
func (v Value) getBytes() []byte {
return unsafe.Slice((*byte)(v.ptr), v.num)
}
-func (v Value) getIface() (x interface{}) {
+func (v Value) getIface() (x any) {
*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
return x
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 6267dc52a..de1777339 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -95,7 +95,7 @@ type Files struct {
// multiple files. Only top-level declarations are registered.
// Note that enum values are in the top-level since that are in the same
// scope as the parent enum.
- descsByName map[protoreflect.FullName]interface{}
+ descsByName map[protoreflect.FullName]any
filesByPath map[string][]protoreflect.FileDescriptor
numFiles int
}
@@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error {
defer globalMutex.Unlock()
}
if r.descsByName == nil {
- r.descsByName = map[protoreflect.FullName]interface{}{
+ r.descsByName = map[protoreflect.FullName]any{
"": &packageDescriptor{},
}
r.filesByPath = make(map[string][]protoreflect.FileDescriptor)
@@ -485,7 +485,7 @@ type Types struct {
}
type (
- typesByName map[protoreflect.FullName]interface{}
+ typesByName map[protoreflect.FullName]any
extensionsByMessage map[protoreflect.FullName]extensionsByNumber
extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType
)
@@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error {
return nil
}
-func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
+func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error {
name := desc.FullName()
prev := r.typesByName[name]
if prev != nil {
@@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p
}
}
-func typeName(t interface{}) string {
+func typeName(t any) string {
switch t.(type) {
case protoreflect.EnumType:
return "enum"
@@ -854,7 +854,7 @@ func typeName(t interface{}) string {
}
}
-func amendErrorWithCaller(err error, prev, curr interface{}) error {
+func amendErrorWithCaller(err error, prev, curr any) error {
prevPkg := goPackage(prev)
currPkg := goPackage(curr)
if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
@@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error {
return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg)
}
-func goPackage(v interface{}) string {
+func goPackage(v any) string {
switch d := v.(type) {
case protoreflect.EnumType:
v = d.Descriptor()
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 78624cf60..9403eb075 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -54,6 +54,9 @@ type Edition int32
const (
// A placeholder for an unknown edition value.
Edition_EDITION_UNKNOWN Edition = 0
+ // A placeholder edition for specifying default behaviors *before* a feature
+ // was first introduced. This is effectively an "infinite past".
+ Edition_EDITION_LEGACY Edition = 900
// Legacy syntax "editions". These pre-date editions, but behave much like
// distinct editions. These can't be used to specify the edition of proto
// files, but feature definitions must supply proto2/proto3 defaults for
@@ -82,6 +85,7 @@ const (
var (
Edition_name = map[int32]string{
0: "EDITION_UNKNOWN",
+ 900: "EDITION_LEGACY",
998: "EDITION_PROTO2",
999: "EDITION_PROTO3",
1000: "EDITION_2023",
@@ -95,6 +99,7 @@ var (
}
Edition_value = map[string]int32{
"EDITION_UNKNOWN": 0,
+ "EDITION_LEGACY": 900,
"EDITION_PROTO2": 998,
"EDITION_PROTO3": 999,
"EDITION_2023": 1000,
@@ -2177,12 +2182,16 @@ type FileOptions struct {
//
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
+ // A proto2 file can set this to true to opt in to UTF-8 checking for Java,
+ // which will throw an exception if invalid UTF-8 is parsed from the wire or
+ // assigned to a string field.
+ //
+ // TODO: clarify exactly what kinds of field types this option
+ // applies to, and update these docs accordingly.
+ //
+ // Proto3 files already perform these checks. Setting the option explicitly to
+ // false has no effect: it cannot be used to opt proto3 files out of UTF-8
+ // checks.
JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
// Sets the Go package where structs generated from this .proto will be
@@ -2679,7 +2688,8 @@ type FieldOptions struct {
Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
// Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
+ Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
+ FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2811,6 +2821,13 @@ func (x *FieldOptions) GetFeatures() *FeatureSet {
return nil
}
+func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
+ if x != nil {
+ return x.FeatureSupport
+ }
+ return nil
+}
+
func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2995,6 +3012,8 @@ type EnumValueOptions struct {
// out when using debug formats, e.g. when the field contains sensitive
// credentials.
DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+ // Information about the support window of a feature value.
+ FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -3058,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool {
return Default_EnumValueOptions_DebugRedact
}
+func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
+ if x != nil {
+ return x.FeatureSupport
+ }
+ return nil
+}
+
func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -3968,6 +3994,88 @@ func (x *FieldOptions_EditionDefault) GetValue() string {
return ""
}
+// Information about the support window of a feature.
+type FieldOptions_FeatureSupport struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The edition that this feature was first available in. In editions
+ // earlier than this one, the default assigned to EDITION_LEGACY will be
+ // used, and proto files will not be able to override it.
+ EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"`
+ // The edition this feature becomes deprecated in. Using this after this
+ // edition may trigger warnings.
+ EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"`
+ // The deprecation warning text if this feature is used after the edition it
+ // was marked deprecated in.
+ DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"`
+ // The edition this feature is no longer available in. In editions after
+ // this one, the last default assigned will be used, and proto files will
+ // not be able to override it.
+ EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"`
+}
+
+func (x *FieldOptions_FeatureSupport) Reset() {
+ *x = FieldOptions_FeatureSupport{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldOptions_FeatureSupport) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldOptions_FeatureSupport) ProtoMessage() {}
+
+func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead.
+func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1}
+}
+
+func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition {
+ if x != nil && x.EditionIntroduced != nil {
+ return *x.EditionIntroduced
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition {
+ if x != nil && x.EditionDeprecated != nil {
+ return *x.EditionDeprecated
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string {
+ if x != nil && x.DeprecationWarning != nil {
+ return *x.DeprecationWarning
+ }
+ return ""
+}
+
+func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition {
+ if x != nil && x.EditionRemoved != nil {
+ return *x.EditionRemoved
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
@@ -3985,7 +4093,7 @@ type UninterpretedOption_NamePart struct {
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3998,7 +4106,7 @@ func (x *UninterpretedOption_NamePart) String() string {
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4037,14 +4145,17 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
- Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ // Defaults of features that can be overridden in this edition.
+ OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"`
+ // Defaults of features that can't be overridden in this edition.
+ FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"`
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4057,7 +4168,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4080,9 +4191,16 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition {
return Edition_EDITION_UNKNOWN
}
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet {
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet {
if x != nil {
- return x.Features
+ return x.OverridableFeatures
+ }
+ return nil
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet {
+ if x != nil {
+ return x.FixedFeatures
}
return nil
}
@@ -4188,7 +4306,7 @@ type SourceCodeInfo_Location struct {
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4201,7 +4319,7 @@ func (x *SourceCodeInfo_Location) String() string {
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4275,7 +4393,7 @@ type GeneratedCodeInfo_Annotation struct {
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4288,7 +4406,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4597,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b,
0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50,
0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f,
@@ -4670,405 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49,
0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e,
0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03,
- 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f,
- 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c,
- 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c,
- 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c,
- 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43,
- 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05,
- 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04,
- 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e,
- 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b,
- 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
- 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41,
- 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a,
- 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
- 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
- 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77,
- 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f,
- 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74,
- 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a,
- 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52,
- 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
- 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52,
- 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
- 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49,
- 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10,
- 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02,
- 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53,
- 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f,
- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
- 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45,
- 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52,
- 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45,
- 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c,
- 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10,
- 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45,
- 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47,
- 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11,
- 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c,
- 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
- 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06,
- 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13,
- 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
- 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f,
- 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04,
- 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79,
- 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52,
- 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63,
- 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
- 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
+ 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70,
+ 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f,
+ 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64,
+ 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70,
+ 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61,
+ 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e,
+ 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73,
+ 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37,
+ 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c,
- 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
+ 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04,
+ 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a,
+ 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16,
+ 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06,
+ 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53,
+ 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e,
+ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c,
+ 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01,
0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
- 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a,
+ 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+ 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f,
+ 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+ 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+ 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37,
- 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a,
+ 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12,
+ 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f,
+ 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e,
+ 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11,
+ 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
+ 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69,
+ 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
+ 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
+ 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
+ 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
+ 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
+ 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
+ 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+ 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
+ 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
+ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
+ 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
+ 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
+ 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+ 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f,
+ 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
- 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
- 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
- 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
- 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
- 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
- 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08,
- 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
- 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
- 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43,
- 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
- 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
- 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
- 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69,
- 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65,
- 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52,
- 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f,
- 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52,
- 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a,
- 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e,
- 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
- 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42,
- 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
- 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49,
- 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
- 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75,
- 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+ 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
+ 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
+ 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
+ 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02,
+ 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61,
+ 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
+ 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f,
+ 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70,
+ 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70,
+ 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+ 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08,
+ 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02,
+ 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65,
+ 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79,
+ 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
+ 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d,
+ 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08,
+ 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e,
+ 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f,
+ 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10,
+ 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10,
+ 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a,
+ 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+ 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69,
+ 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10,
+ 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21,
+ 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+ 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61,
+ 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a,
+ 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
+ 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61,
+ 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01,
+ 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c,
+ 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c,
+ 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c,
+ 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09,
+ 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75,
+ 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01,
+ 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01,
+ 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07,
+ 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72,
+ 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e,
+ 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
- 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b,
- 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04,
- 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
- 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01,
- 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6,
- 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52,
- 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
- 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66,
- 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01,
- 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18,
- 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07,
- 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45,
- 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98,
- 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
- 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73,
- 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f,
- 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98,
- 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59,
- 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2,
- 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73,
- 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
- 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
- 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
- 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
- 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
- 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
- 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
- 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
- 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
- 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
- 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
- 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
- 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
- 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f,
- 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12,
- 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
- 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a,
- 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44,
- 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10,
- 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
- 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f,
- 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45,
- 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07,
- 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07,
- 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e,
- 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
- 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f,
- 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
- 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12,
- 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a,
- 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
- 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e,
- 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
- 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70,
- 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70,
- 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f,
- 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65,
- 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a,
- 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
- 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
- 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65,
- 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c,
- 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f,
- 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
- 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a,
- 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46,
- 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73,
- 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74,
+ 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42,
+ 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
+ 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50,
+ 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15,
+ 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63,
+ 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38,
+ 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98,
+ 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6,
+ 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2,
+ 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01,
+ 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47,
+ 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01,
+ 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
+ 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01,
+ 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53,
+ 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05,
+ 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a,
+ 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46,
+ 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49,
+ 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49,
+ 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45,
+ 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f,
+ 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10,
+ 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45,
+ 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43,
+ 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+ 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45,
+ 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66,
+ 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55,
+ 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55,
+ 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49,
+ 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04,
+ 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45,
+ 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41,
+ 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f,
+ 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45,
+ 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f,
+ 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f,
+ 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
+ 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c,
+ 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52,
+ 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e,
+ 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07,
+ 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+ 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f,
+ 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d,
- 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22,
- 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e,
- 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09,
- 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12,
- 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f,
- 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
- 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32,
- 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17,
- 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45,
- 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45,
- 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53,
- 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54,
- 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e,
- 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50,
- 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d,
+ 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d,
+ 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46,
+ 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f,
+ 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
+ 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61,
+ 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66,
+ 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+ 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a,
+ 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce,
+ 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70,
+ 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c,
+ 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f,
+ 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
+ 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64,
+ 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73,
+ 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44,
+ 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22,
+ 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64,
+ 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67,
+ 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08,
+ 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61,
+ 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07,
+ 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53,
+ 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13,
+ 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+ 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c,
+ 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a,
+ 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10,
+ 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30,
+ 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
+ 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54,
+ 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44,
+ 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54,
+ 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
+ 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f,
+ 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
}
var (
@@ -5084,8 +5242,8 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
-var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_goTypes = []any{
(Edition)(0), // 0: google.protobuf.Edition
(ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState
(FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type
@@ -5131,10 +5289,11 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
(*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration
(*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
(*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault
- (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart
- (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation
+ (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport
+ (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart
+ (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
@@ -5179,40 +5338,46 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{
8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
- 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
- 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
- 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
- 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
- 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
- 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
- 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
- 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
- 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
- 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet
- 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 71, // [71:71] is the sub-list for method output_type
- 71, // [71:71] is the sub-list for method input_type
- 71, // [71:71] is the sub-list for extension type_name
- 71, // [71:71] is the sub-list for extension extendee
- 0, // [0:71] is the sub-list for field type_name
+ 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+ 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+ 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+ 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+ 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+ 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+ 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+ 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+ 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+ 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+ 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+ 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+ 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+ 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+ 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+ 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+ 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+ 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 77, // [77:77] is the sub-list for method output_type
+ 77, // [77:77] is the sub-list for method input_type
+ 77, // [77:77] is the sub-list for extension type_name
+ 77, // [77:77] is the sub-list for extension extendee
+ 0, // [0:77] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5221,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*FileDescriptorSet); i {
case 0:
return &v.state
@@ -5233,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*FileDescriptorProto); i {
case 0:
return &v.state
@@ -5245,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*DescriptorProto); i {
case 0:
return &v.state
@@ -5257,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*ExtensionRangeOptions); i {
case 0:
return &v.state
@@ -5271,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*FieldDescriptorProto); i {
case 0:
return &v.state
@@ -5283,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*OneofDescriptorProto); i {
case 0:
return &v.state
@@ -5295,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*EnumDescriptorProto); i {
case 0:
return &v.state
@@ -5307,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*EnumValueDescriptorProto); i {
case 0:
return &v.state
@@ -5319,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*ServiceDescriptorProto); i {
case 0:
return &v.state
@@ -5331,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*MethodDescriptorProto); i {
case 0:
return &v.state
@@ -5343,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*FileOptions); i {
case 0:
return &v.state
@@ -5357,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*MessageOptions); i {
case 0:
return &v.state
@@ -5371,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*FieldOptions); i {
case 0:
return &v.state
@@ -5385,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*OneofOptions); i {
case 0:
return &v.state
@@ -5399,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*EnumOptions); i {
case 0:
return &v.state
@@ -5413,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*EnumValueOptions); i {
case 0:
return &v.state
@@ -5427,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*ServiceOptions); i {
case 0:
return &v.state
@@ -5441,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*MethodOptions); i {
case 0:
return &v.state
@@ -5455,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*UninterpretedOption); i {
case 0:
return &v.state
@@ -5467,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*FeatureSet); i {
case 0:
return &v.state
@@ -5481,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*FeatureSetDefaults); i {
case 0:
return &v.state
@@ -5493,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*SourceCodeInfo); i {
case 0:
return &v.state
@@ -5505,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*GeneratedCodeInfo); i {
case 0:
return &v.state
@@ -5517,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*DescriptorProto_ExtensionRange); i {
case 0:
return &v.state
@@ -5529,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*DescriptorProto_ReservedRange); i {
case 0:
return &v.state
@@ -5541,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*ExtensionRangeOptions_Declaration); i {
case 0:
return &v.state
@@ -5553,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
case 0:
return &v.state
@@ -5565,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*FieldOptions_EditionDefault); i {
case 0:
return &v.state
@@ -5577,7 +5742,19 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any {
+ switch v := v.(*FieldOptions_FeatureSupport); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any {
switch v := v.(*UninterpretedOption_NamePart); i {
case 0:
return &v.state
@@ -5589,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any {
switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
case 0:
return &v.state
@@ -5601,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any {
switch v := v.(*SourceCodeInfo_Location); i {
case 0:
return &v.state
@@ -5613,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() {
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any {
switch v := v.(*GeneratedCodeInfo_Annotation); i {
case 0:
return &v.state
@@ -5632,7 +5809,7 @@ func file_google_protobuf_descriptor_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
NumEnums: 17,
- NumMessages: 32,
+ NumMessages: 33,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
index 39b024b46..1ba1dfa5a 100644
--- a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
@@ -294,7 +294,7 @@ func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
case fd.IsMap():
return protoreflect.ValueOfMap(&dynamicMap{
desc: fd,
- mapv: make(map[interface{}]protoreflect.Value),
+ mapv: make(map[any]protoreflect.Value),
})
case fd.IsList():
return protoreflect.ValueOfList(&dynamicList{desc: fd})
@@ -450,7 +450,7 @@ func (x *dynamicList) IsValid() bool {
type dynamicMap struct {
desc protoreflect.FieldDescriptor
- mapv map[interface{}]protoreflect.Value
+ mapv map[any]protoreflect.Value
}
func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] }
@@ -634,11 +634,11 @@ func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value {
//
// The InterfaceOf and ValueOf methods of the extension type are defined as:
//
-// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value {
+// func (xt extensionType) ValueOf(iv any) protoreflect.Value {
// return protoreflect.ValueOf(iv)
// }
//
-// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} {
+// func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
// return v.Interface()
// }
//
@@ -658,7 +658,7 @@ func (xt extensionType) New() protoreflect.Value {
case xt.desc.IsMap():
return protoreflect.ValueOfMap(&dynamicMap{
desc: xt.desc,
- mapv: make(map[interface{}]protoreflect.Value),
+ mapv: make(map[any]protoreflect.Value),
})
case xt.desc.IsList():
return protoreflect.ValueOfList(&dynamicList{desc: xt.desc})
@@ -686,18 +686,18 @@ func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
return xt.desc
}
-func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value {
+func (xt extensionType) ValueOf(iv any) protoreflect.Value {
v := protoreflect.ValueOf(iv)
typecheck(xt.desc, v)
return v
}
-func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} {
+func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
typecheck(xt.desc, v)
return v.Interface()
}
-func (xt extensionType) IsValidInterface(iv interface{}) bool {
+func (xt extensionType) IsValidInterface(iv any) bool {
return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil
}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index 25de5ae00..a2ca940c5 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -6,9 +6,9 @@
// https://developers.google.com/open-source/licenses/bsd
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: reflect/protodesc/proto/go_features.proto
+// source: google/protobuf/go_features.proto
-package proto
+package gofeaturespb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -30,7 +30,7 @@ type GoFeatures struct {
func (x *GoFeatures) Reset() {
*x = GoFeatures{}
if protoimpl.UnsafeEnabled {
- mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0]
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -43,7 +43,7 @@ func (x *GoFeatures) String() string {
func (*GoFeatures) ProtoMessage() {}
func (x *GoFeatures) ProtoReflect() protoreflect.Message {
- mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0]
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -56,7 +56,7 @@ func (x *GoFeatures) ProtoReflect() protoreflect.Message {
// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead.
func (*GoFeatures) Descriptor() ([]byte, []int) {
- return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0}
+ return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0}
}
func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
@@ -66,69 +66,73 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
return false
}
-var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
+var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
{
ExtendedType: (*descriptorpb.FeatureSet)(nil),
ExtensionType: (*GoFeatures)(nil),
Field: 1002,
- Name: "google.protobuf.go",
+ Name: "pb.go",
Tag: "bytes,1002,opt,name=go",
- Filename: "reflect/protodesc/proto/go_features.proto",
+ Filename: "google/protobuf/go_features.proto",
},
}
// Extension fields to descriptorpb.FeatureSet.
var (
- // optional google.protobuf.GoFeatures go = 1002;
- E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0]
+ // optional pb.GoFeatures go = 1002;
+ E_Go = &file_google_protobuf_go_features_proto_extTypes[0]
)
-var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor
-
-var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{
- 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64,
- 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a,
- 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a,
- 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c,
- 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75,
- 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7,
- 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68,
- 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f,
- 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_go_features_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
+ 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
+ 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
+ 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
+ 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
+ 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
+ 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
+ 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
+ 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
+ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
+ 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
+ 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12,
+ 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
}
var (
- file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once
- file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc
+ file_google_protobuf_go_features_proto_rawDescOnce sync.Once
+ file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc
)
-func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte {
- file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() {
- file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData)
+func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
+ file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData)
})
- return file_reflect_protodesc_proto_go_features_proto_rawDescData
+ return file_google_protobuf_go_features_proto_rawDescData
}
-var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{
- (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures
+var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_go_features_proto_goTypes = []any{
+ (*GoFeatures)(nil), // 0: pb.GoFeatures
(*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
}
-var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{
- 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet
- 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures
+var file_google_protobuf_go_features_proto_depIdxs = []int32{
+ 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet
+ 0, // 1: pb.go:type_name -> pb.GoFeatures
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
1, // [1:2] is the sub-list for extension type_name
@@ -136,13 +140,13 @@ var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for field type_name
}
-func init() { file_reflect_protodesc_proto_go_features_proto_init() }
-func file_reflect_protodesc_proto_go_features_proto_init() {
- if File_reflect_protodesc_proto_go_features_proto != nil {
+func init() { file_google_protobuf_go_features_proto_init() }
+func file_google_protobuf_go_features_proto_init() {
+ if File_google_protobuf_go_features_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
- file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GoFeatures); i {
case 0:
return &v.state
@@ -159,19 +163,19 @@ func file_reflect_protodesc_proto_go_features_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc,
+ RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 1,
NumServices: 0,
},
- GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes,
- DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs,
- MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes,
- ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes,
+ GoTypes: file_google_protobuf_go_features_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
+ MessageInfos: file_google_protobuf_go_features_proto_msgTypes,
+ ExtensionInfos: file_google_protobuf_go_features_proto_extTypes,
}.Build()
- File_reflect_protodesc_proto_go_features_proto = out.File
- file_reflect_protodesc_proto_go_features_proto_rawDesc = nil
- file_reflect_protodesc_proto_go_features_proto_goTypes = nil
- file_reflect_protodesc_proto_go_features_proto_depIdxs = nil
+ File_google_protobuf_go_features_proto = out.File
+ file_google_protobuf_go_features_proto_rawDesc = nil
+ file_google_protobuf_go_features_proto_goTypes = nil
+ file_google_protobuf_go_features_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto
deleted file mode 100644
index d24657129..000000000
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto
+++ /dev/null
@@ -1,28 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2023 Google Inc. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd
-
-syntax = "proto2";
-
-package google.protobuf;
-
-import "google/protobuf/descriptor.proto";
-
-option go_package = "google.golang.org/protobuf/types/gofeaturespb";
-
-extend google.protobuf.FeatureSet {
- optional GoFeatures go = 1002;
-}
-
-message GoFeatures {
- // Whether or not to generate the deprecated UnmarshalJSON method for enums.
- optional bool legacy_unmarshal_json_enum = 1 [
- retention = RETENTION_RUNTIME,
- targets = TARGET_TYPE_ENUM,
- edition_defaults = { edition: EDITION_PROTO2, value: "true" },
- edition_defaults = { edition: EDITION_PROTO3, value: "false" }
- ];
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 9de51be54..7172b43d3 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_any_proto_goTypes = []interface{}{
+var file_google_protobuf_any_proto_goTypes = []any{
(*Any)(nil), // 0: google.protobuf.Any
}
var file_google_protobuf_any_proto_depIdxs = []int32{
@@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Any); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index df709a8dd..1b71bcd91 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_duration_proto_goTypes = []interface{}{
+var file_google_protobuf_duration_proto_goTypes = []any{
(*Duration)(nil), // 0: google.protobuf.Duration
}
var file_google_protobuf_duration_proto_depIdxs = []int32{
@@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Duration); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index 9a7277ba3..d87b4fb82 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_empty_proto_goTypes = []interface{}{
+var file_google_protobuf_empty_proto_goTypes = []any{
(*Empty)(nil), // 0: google.protobuf.Empty
}
var file_google_protobuf_empty_proto_depIdxs = []int32{
@@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Empty); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index e8789cb33..ac1e91bb6 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
+var file_google_protobuf_field_mask_proto_goTypes = []any{
(*FieldMask)(nil), // 0: google.protobuf.FieldMask
}
var file_google_protobuf_field_mask_proto_depIdxs = []int32{
@@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*FieldMask); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index d2bac8b88..d45361cbc 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -49,11 +49,11 @@
// The standard Go "encoding/json" package has functionality to serialize
// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
// ListValue.AsSlice methods can convert the protobuf message representation into
-// a form represented by interface{}, map[string]interface{}, and []interface{}.
+// a form represented by any, map[string]any, and []any.
// This form can be used with other packages that operate on such data structures
// and also directly with the standard json package.
//
-// In order to convert the interface{}, map[string]interface{}, and []interface{}
+// In order to convert the any, map[string]any, and []any
// forms back as Value, Struct, and ListValue messages, use the NewStruct,
// NewList, and NewValue constructor functions.
//
@@ -88,28 +88,28 @@
//
// To construct a Value message representing the above JSON object:
//
-// m, err := structpb.NewValue(map[string]interface{}{
+// m, err := structpb.NewValue(map[string]any{
// "firstName": "John",
// "lastName": "Smith",
// "isAlive": true,
// "age": 27,
-// "address": map[string]interface{}{
+// "address": map[string]any{
// "streetAddress": "21 2nd Street",
// "city": "New York",
// "state": "NY",
// "postalCode": "10021-3100",
// },
-// "phoneNumbers": []interface{}{
-// map[string]interface{}{
+// "phoneNumbers": []any{
+// map[string]any{
// "type": "home",
// "number": "212 555-1234",
// },
-// map[string]interface{}{
+// map[string]any{
// "type": "office",
// "number": "646 555-4567",
// },
// },
-// "children": []interface{}{},
+// "children": []any{},
// "spouse": nil,
// })
// if err != nil {
@@ -197,7 +197,7 @@ type Struct struct {
// NewStruct constructs a Struct from a general-purpose Go map.
// The map keys must be valid UTF-8.
// The map values are converted using NewValue.
-func NewStruct(v map[string]interface{}) (*Struct, error) {
+func NewStruct(v map[string]any) (*Struct, error) {
x := &Struct{Fields: make(map[string]*Value, len(v))}
for k, v := range v {
if !utf8.ValidString(k) {
@@ -214,9 +214,9 @@ func NewStruct(v map[string]interface{}) (*Struct, error) {
// AsMap converts x to a general-purpose Go map.
// The map values are converted by calling Value.AsInterface.
-func (x *Struct) AsMap() map[string]interface{} {
+func (x *Struct) AsMap() map[string]any {
f := x.GetFields()
- vs := make(map[string]interface{}, len(f))
+ vs := make(map[string]any, len(f))
for k, v := range f {
vs[k] = v.AsInterface()
}
@@ -306,13 +306,13 @@ type Value struct {
// ║ float32, float64 │ stored as NumberValue ║
// ║ string │ stored as StringValue; must be valid UTF-8 ║
// ║ []byte │ stored as StringValue; base64-encoded ║
-// ║ map[string]interface{} │ stored as StructValue ║
-// ║ []interface{} │ stored as ListValue ║
+// ║ map[string]any │ stored as StructValue ║
+// ║ []any │ stored as ListValue ║
// ╚════════════════════════╧════════════════════════════════════════════╝
//
// When converting an int64 or uint64 to a NumberValue, numeric precision loss
// is possible since they are stored as a float64.
-func NewValue(v interface{}) (*Value, error) {
+func NewValue(v any) (*Value, error) {
switch v := v.(type) {
case nil:
return NewNullValue(), nil
@@ -342,13 +342,13 @@ func NewValue(v interface{}) (*Value, error) {
case []byte:
s := base64.StdEncoding.EncodeToString(v)
return NewStringValue(s), nil
- case map[string]interface{}:
+ case map[string]any:
v2, err := NewStruct(v)
if err != nil {
return nil, err
}
return NewStructValue(v2), nil
- case []interface{}:
+ case []any:
v2, err := NewList(v)
if err != nil {
return nil, err
@@ -396,7 +396,7 @@ func NewListValue(v *ListValue) *Value {
//
// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
// converted as strings to remain compatible with MarshalJSON.
-func (x *Value) AsInterface() interface{} {
+func (x *Value) AsInterface() any {
switch v := x.GetKind().(type) {
case *Value_NumberValue:
if v != nil {
@@ -580,7 +580,7 @@ type ListValue struct {
// NewList constructs a ListValue from a general-purpose Go slice.
// The slice elements are converted using NewValue.
-func NewList(v []interface{}) (*ListValue, error) {
+func NewList(v []any) (*ListValue, error) {
x := &ListValue{Values: make([]*Value, len(v))}
for i, v := range v {
var err error
@@ -594,9 +594,9 @@ func NewList(v []interface{}) (*ListValue, error) {
// AsSlice converts x to a general-purpose Go slice.
// The slice elements are converted by calling Value.AsInterface.
-func (x *ListValue) AsSlice() []interface{} {
+func (x *ListValue) AsSlice() []any {
vals := x.GetValues()
- vs := make([]interface{}, len(vals))
+ vs := make([]any, len(vals))
for i, v := range vals {
vs[i] = v.AsInterface()
}
@@ -716,7 +716,7 @@ func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_google_protobuf_struct_proto_goTypes = []interface{}{
+var file_google_protobuf_struct_proto_goTypes = []any{
(NullValue)(0), // 0: google.protobuf.NullValue
(*Struct)(nil), // 1: google.protobuf.Struct
(*Value)(nil), // 2: google.protobuf.Value
@@ -743,7 +743,7 @@ func file_google_protobuf_struct_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Struct); i {
case 0:
return &v.state
@@ -755,7 +755,7 @@ func file_google_protobuf_struct_proto_init() {
return nil
}
}
- file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*Value); i {
case 0:
return &v.state
@@ -767,7 +767,7 @@ func file_google_protobuf_struct_proto_init() {
return nil
}
}
- file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ListValue); i {
case 0:
return &v.state
@@ -780,7 +780,7 @@ func file_google_protobuf_struct_proto_init() {
}
}
}
- file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
(*Value_StringValue)(nil),
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 81511a336..83a5a645b 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+var file_google_protobuf_timestamp_proto_goTypes = []any{
(*Timestamp)(nil), // 0: google.protobuf.Timestamp
}
var file_google_protobuf_timestamp_proto_depIdxs = []int32{
@@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Timestamp); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 762a87130..e473f826a 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -605,7 +605,7 @@ func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
}
var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
+var file_google_protobuf_wrappers_proto_goTypes = []any{
(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
(*FloatValue)(nil), // 1: google.protobuf.FloatValue
(*Int64Value)(nil), // 2: google.protobuf.Int64Value
@@ -630,7 +630,7 @@ func file_google_protobuf_wrappers_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*DoubleValue); i {
case 0:
return &v.state
@@ -642,7 +642,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*FloatValue); i {
case 0:
return &v.state
@@ -654,7 +654,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*Int64Value); i {
case 0:
return &v.state
@@ -666,7 +666,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*UInt64Value); i {
case 0:
return &v.state
@@ -678,7 +678,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Int32Value); i {
case 0:
return &v.state
@@ -690,7 +690,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*UInt32Value); i {
case 0:
return &v.state
@@ -702,7 +702,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*BoolValue); i {
case 0:
return &v.state
@@ -714,7 +714,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*StringValue); i {
case 0:
return &v.state
@@ -726,7 +726,7 @@ func file_google_protobuf_wrappers_proto_init() {
return nil
}
}
- file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*BytesValue); i {
case 0:
return &v.state
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_service_types.go b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_service_types.go
index 20435663a..807febf7f 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_service_types.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_service_types.go
@@ -18,6 +18,7 @@ package v1alpha1
import (
kmapi "kmodules.xyz/client-go/api/v1"
+ ofst "kmodules.xyz/offshoot-api/api/v1"
"kmodules.xyz/resource-metadata/apis/shared"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -57,6 +58,8 @@ type GenericResourceServiceFacilities struct {
Backup GenericResourceServiceFacilitator `json:"backup,omitempty"`
Monitoring GenericResourceServiceFacilitator `json:"monitoring,omitempty"`
Exec []ExecServiceFacilitator `json:"exec,omitempty"`
+ // +optional
+ Gateway *ofst.Gateway `json:"gateway,omitempty"`
}
type GenericResourceServiceFacilitator struct {
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_types.go b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_types.go
index 65d53a196..20e67c58f 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_types.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/generic_resource_types.go
@@ -68,7 +68,38 @@ type GenericResourceSpec struct {
RoleResourceLimits map[api.PodRole]core.ResourceList `json:"roleResourceLimits,omitempty"`
// +optional
RoleResourceRequests map[api.PodRole]core.ResourceList `json:"roleResourceRequests,omitempty"`
- Status GenericResourceStatus `json:"status"`
+
+ Pods []ComputeResource `json:"pods,omitempty"`
+ Storage []StorageResource `json:"storage,omitempty"`
+
+ Status GenericResourceStatus `json:"status"`
+}
+
+type ComputeResource struct {
+ // +optional
+ UID types.UID `json:"uid,omitempty"`
+ Name string `json:"name"`
+ // +optional
+ CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty"`
+ Containers []ContainerResource `json:"containers,omitempty"`
+ InitContainers []ContainerResource `json:"initContainers,omitempty"`
+}
+
+type ContainerResource struct {
+ Name string `json:"name"`
+ // +optional
+ Resource core.ResourceRequirements `json:"resource"`
+ RestartPolicy *core.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
+}
+
+type StorageResource struct {
+ // +optional
+ UID types.UID `json:"uid,omitempty"`
+ Name string `json:"name"`
+ // +optional
+ CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty"`
+ // +optional
+ Resources core.VolumeResourceRequirements `json:"resources,omitempty"`
}
type GenericResourceStatus struct {
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/openapi_generated.go b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/openapi_generated.go
index fd50b147a..7166478f9 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/openapi_generated.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/openapi_generated.go
@@ -317,6 +317,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"kmodules.xyz/client-go/api/v1.CAPIClusterInfo": schema_kmodulesxyz_client_go_api_v1_CAPIClusterInfo(ref),
"kmodules.xyz/client-go/api/v1.CertificatePrivateKey": schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref),
"kmodules.xyz/client-go/api/v1.CertificateSpec": schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterClaimFeatures": schema_kmodulesxyz_client_go_api_v1_ClusterClaimFeatures(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterClaimInfo": schema_kmodulesxyz_client_go_api_v1_ClusterClaimInfo(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterInfo": schema_kmodulesxyz_client_go_api_v1_ClusterInfo(ref),
"kmodules.xyz/client-go/api/v1.ClusterMetadata": schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref),
"kmodules.xyz/client-go/api/v1.Condition": schema_kmodulesxyz_client_go_api_v1_Condition(ref),
"kmodules.xyz/client-go/api/v1.HealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref),
@@ -330,9 +333,33 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"kmodules.xyz/client-go/api/v1.ResourceID": schema_kmodulesxyz_client_go_api_v1_ResourceID(ref),
"kmodules.xyz/client-go/api/v1.TLSConfig": schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref),
"kmodules.xyz/client-go/api/v1.TimeOfDay": schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref),
+ "kmodules.xyz/client-go/api/v1.TypeReference": schema_kmodulesxyz_client_go_api_v1_TypeReference(ref),
"kmodules.xyz/client-go/api/v1.TypedObjectReference": schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref),
"kmodules.xyz/client-go/api/v1.X509Subject": schema_kmodulesxyz_client_go_api_v1_X509Subject(ref),
"kmodules.xyz/client-go/api/v1.stringSetMerger": schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref),
+ "kmodules.xyz/offshoot-api/api/v1.Gateway": schema_kmodulesxyz_offshoot_api_api_v1_Gateway(ref),
+ "kmodules.xyz/offshoot-api/api/v1.GatewayPort": schema_kmodulesxyz_offshoot_api_api_v1_GatewayPort(ref),
+ "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.NamedServiceStatus": schema_kmodulesxyz_offshoot_api_api_v1_NamedServiceStatus(ref),
+ "kmodules.xyz/offshoot-api/api/v1.NamedURL": schema_kmodulesxyz_offshoot_api_api_v1_NamedURL(ref),
+ "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.RuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_RuntimeSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref),
+ "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref),
+ "kmodules.xyz/resource-metadata/apis/core/v1alpha1.ComputeResource": schema_resource_metadata_apis_core_v1alpha1_ComputeResource(ref),
+ "kmodules.xyz/resource-metadata/apis/core/v1alpha1.ContainerResource": schema_resource_metadata_apis_core_v1alpha1_ContainerResource(ref),
"kmodules.xyz/resource-metadata/apis/core/v1alpha1.ContainerView": schema_resource_metadata_apis_core_v1alpha1_ContainerView(ref),
"kmodules.xyz/resource-metadata/apis/core/v1alpha1.ControlPlaneInfo": schema_resource_metadata_apis_core_v1alpha1_ControlPlaneInfo(ref),
"kmodules.xyz/resource-metadata/apis/core/v1alpha1.ExecServiceFacilitator": schema_resource_metadata_apis_core_v1alpha1_ExecServiceFacilitator(ref),
@@ -359,6 +386,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"kmodules.xyz/resource-metadata/apis/core/v1alpha1.ResourceSummarySpec": schema_resource_metadata_apis_core_v1alpha1_ResourceSummarySpec(ref),
"kmodules.xyz/resource-metadata/apis/core/v1alpha1.ResourceView": schema_resource_metadata_apis_core_v1alpha1_ResourceView(ref),
"kmodules.xyz/resource-metadata/apis/core/v1alpha1.Service": schema_resource_metadata_apis_core_v1alpha1_Service(ref),
+ "kmodules.xyz/resource-metadata/apis/core/v1alpha1.StorageResource": schema_resource_metadata_apis_core_v1alpha1_StorageResource(ref),
"kmodules.xyz/resource-metadata/apis/shared.Action": schema_kmodulesxyz_resource_metadata_apis_shared_Action(ref),
"kmodules.xyz/resource-metadata/apis/shared.ActionGroup": schema_kmodulesxyz_resource_metadata_apis_shared_ActionGroup(ref),
"kmodules.xyz/resource-metadata/apis/shared.ActionInfo": schema_kmodulesxyz_resource_metadata_apis_shared_ActionInfo(ref),
@@ -16127,23 +16155,27 @@ func schema_kmodulesxyz_client_go_api_v1_CAPIClusterInfo(ref common.ReferenceCal
Properties: map[string]spec.Schema{
"provider": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
"clusterName": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
},
+ Required: []string{"provider", "namespace", "clusterName"},
},
},
}
@@ -16210,7 +16242,7 @@ func schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref common.ReferenceCal
},
"renewBefore": {
SchemaProps: spec.SchemaProps{
- Description: "Certificate renew before expiration duration",
+ Description: "Certificate renew before expiration duration\n\nDeprecated use `ReconfigureTLS` type OpsRequest instead.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
@@ -16289,6 +16321,130 @@ func schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref common.ReferenceCal
}
}
+func schema_kmodulesxyz_client_go_api_v1_ClusterClaimFeatures(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "enabledFeatures": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "externallyManagedFeatures": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "disabledFeatures": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ClusterClaimInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "clusterMetadata": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.ClusterInfo"),
+ },
+ },
+ },
+ Required: []string{"clusterMetadata"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.ClusterInfo"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ClusterInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ClusterInfo used in ace-installer",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "uid": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "clusterManagers": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "capi": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/client-go/api/v1.CAPIClusterInfo"),
+ },
+ },
+ },
+ Required: []string{"uid", "name", "clusterManagers"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.CAPIClusterInfo"},
+ }
+}
+
func schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -16344,6 +16500,18 @@ func schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref common.ReferenceCal
Format: "",
},
},
+ "managerID": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hubClusterID": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
Required: []string{"uid"},
},
@@ -16799,179 +16967,2001 @@ func schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref common.ReferenceCallback)
}
}
+func schema_kmodulesxyz_client_go_api_v1_TypeReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TypeReference represents an object type.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "apiGroup": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
func schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "TypedObjectReference represents an typed namespaced object.",
+ Description: "TypedObjectReference represents a typed namespaced object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiGroup": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_X509Subject(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "X509Subject Full X509 name specification",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "organizations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Organizations to be used on the Certificate.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "countries": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Countries to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "organizationalUnits": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Organizational Units to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "localities": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Cities to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "provinces": {
+ SchemaProps: spec.SchemaProps{
+ Description: "State/Provinces to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "streetAddresses": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Street addresses to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "postalCodes": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Postal codes to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serialNumber": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Serial number to be used on the CertificateSpec.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Compute Resources required by container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
+ },
+ },
+ "livenessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "readinessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "lifecycle": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+ Ref: ref("k8s.io/api/core/v1.Lifecycle"),
+ },
+ },
+ "securityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ Ref: ref("k8s.io/api/core/v1.SecurityContext"),
+ },
+ },
+ "nice": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Settings to configure `nice` to throttle the load on cpu. More info: http://kennystechtalk.blogspot.com/2015/04/throttling-cpu-usage-with-linux-cgroups.html More info: https://oakbytes.wordpress.com/2012/06/06/linux-scheduler-cfs-and-nice/",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.NiceSettings"),
+ },
+ },
+ "ionice": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Settings to configure `ionice` to throttle the load on disk. More info: http://kennystechtalk.blogspot.com/2015/04/throttling-cpu-usage-with-linux-cgroups.html More info: https://oakbytes.wordpress.com/2012/06/06/linux-scheduler-cfs-and-nice/",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.IONiceSettings"),
+ },
+ },
+ "envFrom": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
+ },
+ },
+ },
+ },
+ },
+ "env": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "kmodules.xyz/offshoot-api/api/v1.IONiceSettings", "kmodules.xyz/offshoot-api/api/v1.NiceSettings"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Represents an ephemeral volume that is handled by a normal storage driver.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "volumeClaimTemplate": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_Gateway(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "ip": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostname": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "services": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Services is an optional configuration for services used to expose database",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.NamedServiceStatus"),
+ },
+ },
+ },
+ },
+ },
+ "ui": {
+ SchemaProps: spec.SchemaProps{
+ Description: "UI is an optional list of database web uis",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.NamedURL"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Required: []string{"name", "namespace"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.NamedServiceStatus", "kmodules.xyz/offshoot-api/api/v1.NamedURL"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_GatewayPort(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "GatewayPort contains information on Gateway service's port.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The name of this port within the gateway service.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "port": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port that will be exposed by the gateway service.",
+ Default: 0,
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "backendServicePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Number of the port to access the backend service.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "nodePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port on each node on which this gateway service is exposed when type is NodePort or LoadBalancer.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ Required: []string{"port"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "https://linux.die.net/man/1/ionice",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "class": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "classData": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_NamedServiceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "alias": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Alias represents the identifier of the service.",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "ports": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.GatewayPort"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Required: []string{"alias", "ports"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.GatewayPort"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_NamedURL(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "alias": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Alias represents the identifier of the service. This should match the db ui chart name",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL of the database ui",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "port": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.GatewayPort"),
+ },
+ },
+ "helmRelease": {
+ SchemaProps: spec.SchemaProps{
+ Description: "HelmRelease is the name of the helm release used to deploy this ui The name format is typically -",
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ Required: []string{"alias", "url", "port"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.LocalObjectReference", "kmodules.xyz/offshoot-api/api/v1.GatewayPort"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "https://linux.die.net/man/1/nice",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "adjustment": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "labels": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "annotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "generateName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "labels": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "annotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "ownerReferences": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "uid",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "apiVersion": {
+ SchemaProps: spec.SchemaProps{
+ Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"),
+ },
+ },
+ "status": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimStatus"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "k8s.io/api/core/v1.PersistentVolumeClaimStatus", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"),
+ },
+ },
+ },
+ Required: []string{"spec"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "podLabels": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodLabels are the labels that will be attached with the respective Pod",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "podAnnotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodAnnotations are the annotations that will be attached with the respective Pod",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "nodeSelector": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serviceAccountName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "serviceAccountAnnotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceAccountAnnotations are the annotations that will be attached with the respective ServiceAccount",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "automountServiceAccountToken": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "nodeName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "securityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
+ },
+ },
+ "imagePullSecrets": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodRuntimeSettings. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's scheduling constraints",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "schedulerName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's tolerations.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "priorityClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "priority": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "readinessGates": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PodReadinessGate"),
+ },
+ },
+ },
+ },
+ },
+ "runtimeClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "enableServiceLinks": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "topologySpreadConstraints": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-map-keys": []interface{}{
+ "topologyKey",
+ "whenUnsatisfiable",
+ },
+ "x-kubernetes-list-type": "map",
+ "x-kubernetes-patch-merge-key": "topologyKey",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "volumes": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge,retainKeys",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"),
+ },
+ },
+ },
+ },
+ },
+ "initContainers": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.Container"),
+ },
+ },
+ },
+ },
+ },
+ "terminationGracePeriodSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "dnsPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.",
+ Type: []string{"string"},
+ Format: "",
+ Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"},
+ },
+ },
+ "nodeSelector": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-map-type": "atomic",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serviceAccountName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostNetwork": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "hostPID": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use the host's pid namespace. Optional: Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "hostIPC": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use the host's ipc namespace. Optional: Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "shareProcessNamespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "securityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
+ Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
+ },
+ },
+ "imagePullSecrets": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's scheduling constraints",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "schedulerName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's tolerations.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "priorityClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "priority": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "dnsConfig": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
+ Ref: ref("k8s.io/api/core/v1.PodDNSConfig"),
+ },
+ },
+ "runtimeClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "enableServiceLinks": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "topologySpreadConstraints": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-map-keys": []interface{}{
+ "topologyKey",
+ "whenUnsatisfiable",
+ },
+ "x-kubernetes-list-type": "map",
+ "x-kubernetes-patch-merge-key": "topologyKey",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
+ },
+ },
+ },
+ },
+ },
+ "args": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Compute Resources required by the sidecar container.",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
+ },
+ },
+ "livenessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Controllers may set default LivenessProbe if no liveness probe is provided. To ignore defaulting, set the value to empty LivenessProbe \"{}\". Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "readinessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. Controllers may set default ReadinessProbe if no readyness probe is provided. To ignore defaulting, set the value to empty ReadynessProbe \"{}\". More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "lifecycle": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+ Ref: ref("k8s.io/api/core/v1.Lifecycle"),
+ },
+ },
+ "containerSecurityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ Ref: ref("k8s.io/api/core/v1.SecurityContext"),
+ },
+ },
+ "volumeMounts": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "mountPath",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.VolumeMount"),
+ },
+ },
+ },
+ },
+ },
+ "podPlacementPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodPlacementPolicy is the reference of the podPlacementPolicy",
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PodTemplateSpec describes the data a pod should have when created from a template",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ObjectMeta"),
+ },
+ },
+ "controller": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Workload controller's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PodSpec"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.ObjectMeta", "kmodules.xyz/offshoot-api/api/v1.PodSpec"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_RuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "pod": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings"),
+ },
+ },
+ "container": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings", "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ServicePort contains information on service's port.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "port": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port that will be exposed by this service.",
+ Default: 0,
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "nodePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ Required: []string{"port"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceSpec describes the attributes that a user creates on a service.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "ports": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "port",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ServicePort"),
+ },
+ },
+ },
+ },
+ },
+ "clusterIP": {
+ SchemaProps: spec.SchemaProps{
+ Description: "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "type": {
+ SchemaProps: spec.SchemaProps{
+ Description: "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types\n\nPossible enum values:\n - `\"ClusterIP\"` means a service will only be accessible inside the cluster, via the cluster IP.\n - `\"ExternalName\"` means a service consists of only a reference to an external name that kubedns or equivalent will return as a CNAME record, with no exposing or proxying of any pods involved.\n - `\"LoadBalancer\"` means a service will be exposed via an external load balancer (if the cloud provider supports it), in addition to 'NodePort' type.\n - `\"NodePort\"` means a service will be exposed on one port of every node, in addition to 'ClusterIP' type.",
+ Type: []string{"string"},
+ Format: "",
+ Enum: []interface{}{"ClusterIP", "ExternalName", "LoadBalancer", "NodePort"},
+ },
+ },
+ "externalIPs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "loadBalancerIP": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "loadBalancerSourceRanges": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "externalTrafficPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.\n\nPossible enum values:\n - `\"Cluster\"`\n - `\"Cluster\"` routes traffic to all endpoints.\n - `\"Local\"`\n - `\"Local\"` preserves the source IP of the traffic by routing only to endpoints on the same node as the traffic was received on (dropping the traffic if there are no local endpoints).",
+ Type: []string{"string"},
+ Format: "",
+ Enum: []interface{}{"Cluster", "Cluster", "Local", "Local"},
+ },
+ },
+ "healthCheckNodePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "sessionAffinityConfig": {
+ SchemaProps: spec.SchemaProps{
+ Description: "sessionAffinityConfig contains the configurations of session affinity.",
+ Ref: ref("k8s.io/api/core/v1.SessionAffinityConfig"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.SessionAffinityConfig", "kmodules.xyz/offshoot-api/api/v1.ServicePort"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceTemplateSpec describes the data a service should have when created from a template",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specification of the desired behavior of the service. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ServiceSpec"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.ObjectMeta", "kmodules.xyz/offshoot-api/api/v1.ServiceSpec"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostPath": {
+ SchemaProps: spec.SchemaProps{
+ Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
+ },
+ },
+ "emptyDir": {
+ SchemaProps: spec.SchemaProps{
+ Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+ Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
+ },
+ },
+ "gcePersistentDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
+ },
+ },
+ "awsElasticBlockStore": {
+ SchemaProps: spec.SchemaProps{
+ Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
+ },
+ },
+ "secret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
+ },
+ },
+ "nfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
+ },
+ },
+ "iscsi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
+ Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"),
+ },
+ },
+ "glusterfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"),
+ },
+ },
+ "persistentVolumeClaim": {
+ SchemaProps: spec.SchemaProps{
+ Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
+ },
+ },
+ "rbd": {
+ SchemaProps: spec.SchemaProps{
+ Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"),
+ },
+ },
+ "flexVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"),
+ },
+ },
+ "cinder": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"),
+ },
+ },
+ "cephfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"),
+ },
+ },
+ "flocker": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
+ },
+ },
+ "downwardAPI": {
+ SchemaProps: spec.SchemaProps{
+ Description: "downwardAPI represents downward API about the pod that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"),
+ },
+ },
+ "fc": {
+ SchemaProps: spec.SchemaProps{
+ Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
+ },
+ },
+ "azureFile": {
+ SchemaProps: spec.SchemaProps{
+ Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"),
+ },
+ },
+ "configMap": {
+ SchemaProps: spec.SchemaProps{
+ Description: "configMap represents a configMap that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
+ },
+ },
+ "vsphereVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
+ },
+ },
+ "quobyte": {
+ SchemaProps: spec.SchemaProps{
+ Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
+ },
+ },
+ "azureDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
+ },
+ },
+ "photonPersistentDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
+ },
+ },
+ "projected": {
+ SchemaProps: spec.SchemaProps{
+ Description: "projected items for all in one resources secrets, configmaps, and downward API",
+ Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
+ },
+ },
+ "portworxVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
+ },
+ },
+ "scaleIO": {
+ SchemaProps: spec.SchemaProps{
+ Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"),
+ },
+ },
+ "storageos": {
+ SchemaProps: spec.SchemaProps{
+ Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"),
+ },
+ },
+ "csi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
+ Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
+ },
+ },
+ "ephemeral": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"),
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Represents the source of a volume to mount. Only one of its members may be specified.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "hostPath": {
+ SchemaProps: spec.SchemaProps{
+ Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
+ },
+ },
+ "emptyDir": {
+ SchemaProps: spec.SchemaProps{
+ Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+ Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
+ },
+ },
+ "gcePersistentDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
+ },
+ },
+ "awsElasticBlockStore": {
+ SchemaProps: spec.SchemaProps{
+ Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
+ },
+ },
+ "secret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
+ },
+ },
+ "nfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
+ },
+ },
+ "iscsi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
+ Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"),
+ },
+ },
+ "glusterfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"),
+ },
+ },
+ "persistentVolumeClaim": {
+ SchemaProps: spec.SchemaProps{
+ Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
+ },
+ },
+ "rbd": {
+ SchemaProps: spec.SchemaProps{
+ Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"),
+ },
+ },
+ "flexVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"),
+ },
+ },
+ "cinder": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"),
+ },
+ },
+ "cephfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"),
+ },
+ },
+ "flocker": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
+ },
+ },
+ "downwardAPI": {
+ SchemaProps: spec.SchemaProps{
+ Description: "downwardAPI represents downward API about the pod that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"),
+ },
+ },
+ "fc": {
+ SchemaProps: spec.SchemaProps{
+ Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
+ },
+ },
+ "azureFile": {
+ SchemaProps: spec.SchemaProps{
+ Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"),
+ },
+ },
+ "configMap": {
+ SchemaProps: spec.SchemaProps{
+ Description: "configMap represents a configMap that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
+ },
+ },
+ "vsphereVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
+ },
+ },
+ "quobyte": {
+ SchemaProps: spec.SchemaProps{
+ Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
+ },
+ },
+ "azureDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
},
},
- "kind": {
+ "photonPersistentDisk": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
},
},
- "namespace": {
+ "projected": {
SchemaProps: spec.SchemaProps{
- Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
- Type: []string{"string"},
- Format: "",
+ Description: "projected items for all in one resources secrets, configmaps, and downward API",
+ Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
},
},
- "name": {
+ "portworxVolume": {
SchemaProps: spec.SchemaProps{
- Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
- Default: "",
- Type: []string{"string"},
- Format: "",
+ Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
+ },
+ },
+ "scaleIO": {
+ SchemaProps: spec.SchemaProps{
+ Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"),
+ },
+ },
+ "storageos": {
+ SchemaProps: spec.SchemaProps{
+ Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"),
+ },
+ },
+ "csi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
+ Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
+ },
+ },
+ "ephemeral": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"),
},
},
},
- Required: []string{"name"},
},
},
+ Dependencies: []string{
+ "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"},
}
}
-func schema_kmodulesxyz_client_go_api_v1_X509Subject(ref common.ReferenceCallback) common.OpenAPIDefinition {
+func schema_resource_metadata_apis_core_v1alpha1_ComputeResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
- Description: "X509Subject Full X509 name specification",
- Type: []string{"object"},
+ Type: []string{"object"},
Properties: map[string]spec.Schema{
- "organizations": {
- SchemaProps: spec.SchemaProps{
- Description: "Organizations to be used on the Certificate.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "countries": {
- SchemaProps: spec.SchemaProps{
- Description: "Countries to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
- },
- },
- "organizationalUnits": {
+ "uid": {
SchemaProps: spec.SchemaProps{
- Description: "Organizational Units to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Type: []string{"string"},
+ Format: "",
},
},
- "localities": {
+ "name": {
SchemaProps: spec.SchemaProps{
- Description: "Cities to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
- "provinces": {
+ "creationTimestamp": {
SchemaProps: spec.SchemaProps{
- Description: "State/Provinces to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
- "streetAddresses": {
+ "containers": {
SchemaProps: spec.SchemaProps{
- Description: "Street addresses to be used on the CertificateSpec.",
- Type: []string{"array"},
+ Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/resource-metadata/apis/core/v1alpha1.ContainerResource"),
},
},
},
},
},
- "postalCodes": {
+ "initContainers": {
SchemaProps: spec.SchemaProps{
- Description: "Postal codes to be used on the CertificateSpec.",
- Type: []string{"array"},
+ Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/resource-metadata/apis/core/v1alpha1.ContainerResource"),
},
},
},
},
},
- "serialNumber": {
- SchemaProps: spec.SchemaProps{
- Description: "Serial number to be used on the CertificateSpec.",
- Type: []string{"string"},
- Format: "",
- },
- },
},
+ Required: []string{"name"},
},
},
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.ContainerResource"},
}
}
-func schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref common.ReferenceCallback) common.OpenAPIDefinition {
+func schema_resource_metadata_apis_core_v1alpha1_ContainerResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "resource": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
+ },
+ },
+ "restartPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
},
},
+ Dependencies: []string{
+ "k8s.io/api/core/v1.ResourceRequirements"},
}
}
@@ -17594,11 +19584,16 @@ func schema_resource_metadata_apis_core_v1alpha1_GenericResourceServiceFacilitie
},
},
},
+ "gateway": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.Gateway"),
+ },
+ },
},
},
},
Dependencies: []string{
- "kmodules.xyz/resource-metadata/apis/core/v1alpha1.ExecServiceFacilitator", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.GenericResourceServiceFacilitator"},
+ "kmodules.xyz/offshoot-api/api/v1.Gateway", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.ExecServiceFacilitator", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.GenericResourceServiceFacilitator"},
}
}
@@ -17839,6 +19834,32 @@ func schema_resource_metadata_apis_core_v1alpha1_GenericResourceSpec(ref common.
},
},
},
+ "pods": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/resource-metadata/apis/core/v1alpha1.ComputeResource"),
+ },
+ },
+ },
+ },
+ },
+ "storage": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/resource-metadata/apis/core/v1alpha1.StorageResource"),
+ },
+ },
+ },
+ },
+ },
"status": {
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
@@ -17850,7 +19871,7 @@ func schema_resource_metadata_apis_core_v1alpha1_GenericResourceSpec(ref common.
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/api/resource.Quantity", "kmodules.xyz/client-go/api/v1.ClusterMetadata", "kmodules.xyz/client-go/api/v1.ResourceID", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.GenericResourceStatus"},
+ "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/api/resource.Quantity", "kmodules.xyz/client-go/api/v1.ClusterMetadata", "kmodules.xyz/client-go/api/v1.ResourceID", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.ComputeResource", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.GenericResourceStatus", "kmodules.xyz/resource-metadata/apis/core/v1alpha1.StorageResource"},
}
}
@@ -18451,6 +20472,12 @@ func schema_resource_metadata_apis_core_v1alpha1_Service(ref common.ReferenceCal
Ref: ref("kmodules.xyz/resource-metadata/apis/shared.ResourceQuery"),
},
},
+ "impersonate": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
},
Required: []string{"name", "ref", "query"},
},
@@ -18460,6 +20487,45 @@ func schema_resource_metadata_apis_core_v1alpha1_Service(ref common.ReferenceCal
}
}
+func schema_resource_metadata_apis_core_v1alpha1_StorageResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "uid": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "creationTimestamp": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
+ },
+ },
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.VolumeResourceRequirements"),
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.VolumeResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
+ }
+}
+
func schema_kmodulesxyz_resource_metadata_apis_shared_Action(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -19143,6 +21209,14 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref common
Format: "",
},
},
+ "microsoft": {
+ SchemaProps: spec.SchemaProps{
+ Description: "mcr.microsoft.com",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"appscode": {
SchemaProps: spec.SchemaProps{
Description: "r.appscode.com",
@@ -19175,6 +21249,12 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref common
Ref: ref("kmodules.xyz/resource-metadata/apis/shared.ResourceQuery"),
},
},
+ "impersonate": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
},
Required: []string{"ref", "query"},
},
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/zz_generated.deepcopy.go b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/zz_generated.deepcopy.go
index c6672a102..a081865a7 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/core/v1alpha1/zz_generated.deepcopy.go
@@ -23,6 +23,7 @@ package v1alpha1
import (
apiv1 "kmodules.xyz/client-go/api/v1"
+ offshootapiapiv1 "kmodules.xyz/offshoot-api/api/v1"
shared "kmodules.xyz/resource-metadata/apis/shared"
api "kmodules.xyz/resource-metrics/api"
@@ -33,6 +34,59 @@ import (
version "k8s.io/apimachinery/pkg/version"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComputeResource) DeepCopyInto(out *ComputeResource) {
+ *out = *in
+ in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
+ if in.Containers != nil {
+ in, out := &in.Containers, &out.Containers
+ *out = make([]ContainerResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.InitContainers != nil {
+ in, out := &in.InitContainers, &out.InitContainers
+ *out = make([]ContainerResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputeResource.
+func (in *ComputeResource) DeepCopy() *ComputeResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerResource) DeepCopyInto(out *ContainerResource) {
+ *out = *in
+ in.Resource.DeepCopyInto(&out.Resource)
+ if in.RestartPolicy != nil {
+ in, out := &in.RestartPolicy, &out.RestartPolicy
+ *out = new(v1.ContainerRestartPolicy)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResource.
+func (in *ContainerResource) DeepCopy() *ContainerResource {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerResource)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerView) DeepCopyInto(out *ContainerView) {
*out = *in
@@ -313,6 +367,11 @@ func (in *GenericResourceServiceFacilities) DeepCopyInto(out *GenericResourceSer
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.Gateway != nil {
+ in, out := &in.Gateway, &out.Gateway
+ *out = new(offshootapiapiv1.Gateway)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -443,6 +502,20 @@ func (in *GenericResourceSpec) DeepCopyInto(out *GenericResourceSpec) {
(*out)[key] = outVal
}
}
+ if in.Pods != nil {
+ in, out := &in.Pods, &out.Pods
+ *out = make([]ComputeResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Storage != nil {
+ in, out := &in.Storage, &out.Storage
+ *out = make([]StorageResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
out.Status = in.Status
return
}
@@ -839,3 +912,21 @@ func (in *Service) DeepCopy() *Service {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageResource) DeepCopyInto(out *StorageResource) {
+ *out = *in
+ in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
+ in.Resources.DeepCopyInto(&out.Resources)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageResource.
+func (in *StorageResource) DeepCopy() *StorageResource {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageResource)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/inboxtokenrequest_types.go b/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/inboxtokenrequest_types.go
index 05e4bfb8b..b5032e083 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/inboxtokenrequest_types.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/inboxtokenrequest_types.go
@@ -45,8 +45,8 @@ type InboxTokenRequest struct {
type InboxTokenRequestRequest struct{}
type InboxTokenRequestResponse struct {
- JmapJWTToken string `json:"jmapJWTToken"`
- AdminJWTToken string `json:"adminJWTToken"`
+ AgentJWTToken string `json:"agentJwtToken"`
+ CABundle string `json:"caBundle,omitempty"`
}
func init() {
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/openapi_generated.go b/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/openapi_generated.go
index ad48a4493..128b7bb28 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/openapi_generated.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/openapi_generated.go
@@ -317,6 +317,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"kmodules.xyz/client-go/api/v1.CAPIClusterInfo": schema_kmodulesxyz_client_go_api_v1_CAPIClusterInfo(ref),
"kmodules.xyz/client-go/api/v1.CertificatePrivateKey": schema_kmodulesxyz_client_go_api_v1_CertificatePrivateKey(ref),
"kmodules.xyz/client-go/api/v1.CertificateSpec": schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterClaimFeatures": schema_kmodulesxyz_client_go_api_v1_ClusterClaimFeatures(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterClaimInfo": schema_kmodulesxyz_client_go_api_v1_ClusterClaimInfo(ref),
+ "kmodules.xyz/client-go/api/v1.ClusterInfo": schema_kmodulesxyz_client_go_api_v1_ClusterInfo(ref),
"kmodules.xyz/client-go/api/v1.ClusterMetadata": schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref),
"kmodules.xyz/client-go/api/v1.Condition": schema_kmodulesxyz_client_go_api_v1_Condition(ref),
"kmodules.xyz/client-go/api/v1.HealthCheckSpec": schema_kmodulesxyz_client_go_api_v1_HealthCheckSpec(ref),
@@ -330,9 +333,31 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"kmodules.xyz/client-go/api/v1.ResourceID": schema_kmodulesxyz_client_go_api_v1_ResourceID(ref),
"kmodules.xyz/client-go/api/v1.TLSConfig": schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref),
"kmodules.xyz/client-go/api/v1.TimeOfDay": schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref),
+ "kmodules.xyz/client-go/api/v1.TypeReference": schema_kmodulesxyz_client_go_api_v1_TypeReference(ref),
"kmodules.xyz/client-go/api/v1.TypedObjectReference": schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref),
"kmodules.xyz/client-go/api/v1.X509Subject": schema_kmodulesxyz_client_go_api_v1_X509Subject(ref),
"kmodules.xyz/client-go/api/v1.stringSetMerger": schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref),
+ "kmodules.xyz/offshoot-api/api/v1.Gateway": schema_kmodulesxyz_offshoot_api_api_v1_Gateway(ref),
+ "kmodules.xyz/offshoot-api/api/v1.GatewayPort": schema_kmodulesxyz_offshoot_api_api_v1_GatewayPort(ref),
+ "kmodules.xyz/offshoot-api/api/v1.IONiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.NamedServiceStatus": schema_kmodulesxyz_offshoot_api_api_v1_NamedServiceStatus(ref),
+ "kmodules.xyz/offshoot-api/api/v1.NamedURL": schema_kmodulesxyz_offshoot_api_api_v1_NamedURL(ref),
+ "kmodules.xyz/offshoot-api/api/v1.NiceSettings": schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta": schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaim": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate": schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PodSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.PodTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.RuntimeSettings": schema_kmodulesxyz_offshoot_api_api_v1_RuntimeSettings(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ServicePort": schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ServiceSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.ServiceTemplateSpec": schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref),
+ "kmodules.xyz/offshoot-api/api/v1.Volume": schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref),
+ "kmodules.xyz/offshoot-api/api/v1.VolumeSource": schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref),
"kmodules.xyz/resource-metadata/apis/identity/v1alpha1.ClusterIdentity": schema_resource_metadata_apis_identity_v1alpha1_ClusterIdentity(ref),
"kmodules.xyz/resource-metadata/apis/identity/v1alpha1.ClusterIdentityList": schema_resource_metadata_apis_identity_v1alpha1_ClusterIdentityList(ref),
"kmodules.xyz/resource-metadata/apis/identity/v1alpha1.ControlPlaneInfo": schema_resource_metadata_apis_identity_v1alpha1_ControlPlaneInfo(ref),
@@ -16118,23 +16143,27 @@ func schema_kmodulesxyz_client_go_api_v1_CAPIClusterInfo(ref common.ReferenceCal
Properties: map[string]spec.Schema{
"provider": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
"clusterName": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
},
},
},
+ Required: []string{"provider", "namespace", "clusterName"},
},
},
}
@@ -16201,7 +16230,7 @@ func schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref common.ReferenceCal
},
"renewBefore": {
SchemaProps: spec.SchemaProps{
- Description: "Certificate renew before expiration duration",
+ Description: "Certificate renew before expiration duration\n\nDeprecated use `ReconfigureTLS` type OpsRequest instead.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"),
},
},
@@ -16280,6 +16309,130 @@ func schema_kmodulesxyz_client_go_api_v1_CertificateSpec(ref common.ReferenceCal
}
}
+func schema_kmodulesxyz_client_go_api_v1_ClusterClaimFeatures(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "enabledFeatures": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "externallyManagedFeatures": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "disabledFeatures": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ClusterClaimInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "clusterMetadata": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.ClusterInfo"),
+ },
+ },
+ },
+ Required: []string{"clusterMetadata"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.ClusterInfo"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_ClusterInfo(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ClusterInfo used in ace-installer",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "uid": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "clusterManagers": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "capi": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/client-go/api/v1.CAPIClusterInfo"),
+ },
+ },
+ },
+ Required: []string{"uid", "name", "clusterManagers"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/client-go/api/v1.CAPIClusterInfo"},
+ }
+}
+
func schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
@@ -16335,6 +16488,18 @@ func schema_kmodulesxyz_client_go_api_v1_ClusterMetadata(ref common.ReferenceCal
Format: "",
},
},
+ "managerID": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hubClusterID": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
},
Required: []string{"uid"},
},
@@ -16726,243 +16891,1972 @@ func schema_kmodulesxyz_client_go_api_v1_ResourceID(ref common.ReferenceCallback
},
"kind": {
SchemaProps: spec.SchemaProps{
- Description: "Kind is the serialized kind of the resource. It is normally CamelCase and singular.",
- Type: []string{"string"},
- Format: "",
+ Description: "Kind is the serialized kind of the resource. It is normally CamelCase and singular.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "scope": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"group"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "issuerRef": {
+ SchemaProps: spec.SchemaProps{
+ Description: "IssuerRef is a reference to a Certificate Issuer.",
+ Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"),
+ },
+ },
+ "certificates": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Certificate provides server and/or client certificate options used by application pods. These options are passed to a cert-manager Certificate object. xref: https://github.com/jetstack/cert-manager/blob/v0.16.0/pkg/apis/certmanager/v1beta1/types_certificate.go#L82-L162",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/client-go/api/v1.CertificateSpec"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.TypedLocalObjectReference", "kmodules.xyz/client-go/api/v1.CertificateSpec"},
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TimeOfDay is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
+ Type: apiv1.TimeOfDay{}.OpenAPISchemaType(),
+ Format: apiv1.TimeOfDay{}.OpenAPISchemaFormat(),
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TypeReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TypeReference represents an object type.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "apiGroup": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "TypedObjectReference represents a typed namespaced object.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "apiGroup": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_X509Subject(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "X509Subject Full X509 name specification",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "organizations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Organizations to be used on the Certificate.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "countries": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Countries to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "organizationalUnits": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Organizational Units to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "localities": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Cities to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "provinces": {
+ SchemaProps: spec.SchemaProps{
+ Description: "State/Provinces to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "streetAddresses": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Street addresses to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "postalCodes": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Postal codes to be used on the CertificateSpec.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serialNumber": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Serial number to be used on the CertificateSpec.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ContainerRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Compute Resources required by container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
+ },
+ },
+ "livenessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "readinessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "lifecycle": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+ Ref: ref("k8s.io/api/core/v1.Lifecycle"),
+ },
+ },
+ "securityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ Ref: ref("k8s.io/api/core/v1.SecurityContext"),
+ },
+ },
+ "nice": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Settings to configure `nice` to throttle the load on cpu. More info: http://kennystechtalk.blogspot.com/2015/04/throttling-cpu-usage-with-linux-cgroups.html More info: https://oakbytes.wordpress.com/2012/06/06/linux-scheduler-cfs-and-nice/",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.NiceSettings"),
+ },
+ },
+ "ionice": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Settings to configure `ionice` to throttle the load on disk. More info: http://kennystechtalk.blogspot.com/2015/04/throttling-cpu-usage-with-linux-cgroups.html More info: https://oakbytes.wordpress.com/2012/06/06/linux-scheduler-cfs-and-nice/",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.IONiceSettings"),
+ },
+ },
+ "envFrom": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.EnvFromSource"),
+ },
+ },
+ },
+ },
+ },
+ "env": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "kmodules.xyz/offshoot-api/api/v1.IONiceSettings", "kmodules.xyz/offshoot-api/api/v1.NiceSettings"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_EphemeralVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Represents an ephemeral volume that is handled by a normal storage driver.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "volumeClaimTemplate": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.PersistentVolumeClaimTemplate"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_Gateway(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "ip": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostname": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "services": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Services is an optional configuration for services used to expose database",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.NamedServiceStatus"),
+ },
+ },
+ },
+ },
+ },
+ "ui": {
+ SchemaProps: spec.SchemaProps{
+ Description: "UI is an optional list of database web uis",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.NamedURL"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Required: []string{"name", "namespace"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.NamedServiceStatus", "kmodules.xyz/offshoot-api/api/v1.NamedURL"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_GatewayPort(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "GatewayPort contains information on Gateway service's port.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The name of this port within the gateway service.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "port": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port that will be exposed by the gateway service.",
+ Default: 0,
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "backendServicePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Number of the port to access the backend service.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "nodePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port on each node on which this gateway service is exposed when type is NodePort or LoadBalancer.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ Required: []string{"port"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_IONiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "https://linux.die.net/man/1/ionice",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "class": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "classData": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_NamedServiceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "alias": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Alias represents the identifier of the service.",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "ports": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.GatewayPort"),
+ },
+ },
+ },
+ },
+ },
+ },
+ Required: []string{"alias", "ports"},
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.GatewayPort"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_NamedURL(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "alias": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Alias represents the identifier of the service. This should match the db ui chart name",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "url": {
+ SchemaProps: spec.SchemaProps{
+ Description: "URL of the database ui",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "port": {
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.GatewayPort"),
+ },
+ },
+ "helmRelease": {
+ SchemaProps: spec.SchemaProps{
+ Description: "HelmRelease is the name of the helm release used to deploy this ui The name format is typically -",
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ Required: []string{"alias", "url", "port"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.LocalObjectReference", "kmodules.xyz/offshoot-api/api/v1.GatewayPort"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_NiceSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "https://linux.die.net/man/1/nice",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "adjustment": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "labels": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "annotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PartialObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "generateName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "namespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "labels": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "annotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "ownerReferences": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "uid",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaim(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "kind": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "apiVersion": {
+ SchemaProps: spec.SchemaProps{
+ Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"),
+ },
+ },
+ "status": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimStatus"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "k8s.io/api/core/v1.PersistentVolumeClaimStatus", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PersistentVolumeClaimTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"),
+ },
+ },
+ },
+ Required: []string{"spec"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "kmodules.xyz/offshoot-api/api/v1.PartialObjectMeta"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PodRuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "podLabels": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodLabels are the labels that will be attached with the respective Pod",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "podAnnotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodAnnotations are the annotations that will be attached with the respective Pod",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "nodeSelector": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serviceAccountName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "serviceAccountAnnotations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceAccountAnnotations are the annotations that will be attached with the respective ServiceAccount",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "automountServiceAccountToken": {
+ SchemaProps: spec.SchemaProps{
+ Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "nodeName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "securityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
+ },
+ },
+ "imagePullSecrets": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodRuntimeSettings. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's scheduling constraints",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "schedulerName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's tolerations.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "priorityClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "priority": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "readinessGates": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.PodReadinessGate"),
+ },
+ },
+ },
+ },
+ },
+ "runtimeClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "enableServiceLinks": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "topologySpreadConstraints": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-map-keys": []interface{}{
+ "topologyKey",
+ "whenUnsatisfiable",
+ },
+ "x-kubernetes-list-type": "map",
+ "x-kubernetes-patch-merge-key": "topologyKey",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PodSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "volumes": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge,retainKeys",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.Volume"),
+ },
+ },
+ },
+ },
+ },
+ "initContainers": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.Container"),
+ },
+ },
+ },
+ },
+ },
+ "terminationGracePeriodSeconds": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
+ Type: []string{"integer"},
+ Format: "int64",
+ },
+ },
+ "dnsPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.\n\nPossible enum values:\n - `\"ClusterFirst\"` indicates that the pod should use cluster DNS first unless hostNetwork is true, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"ClusterFirstWithHostNet\"` indicates that the pod should use cluster DNS first, if it is available, then fall back on the default (as determined by kubelet) DNS settings.\n - `\"Default\"` indicates that the pod should use the default (as determined by kubelet) DNS settings.\n - `\"None\"` indicates that the pod should use empty DNS settings. DNS parameters such as nameservers and search paths should be defined via DNSConfig.",
+ Type: []string{"string"},
+ Format: "",
+ Enum: []interface{}{"ClusterFirst", "ClusterFirstWithHostNet", "Default", "None"},
+ },
+ },
+ "nodeSelector": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-map-type": "atomic",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+ Type: []string{"object"},
+ AdditionalProperties: &spec.SchemaOrBool{
+ Allows: true,
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "serviceAccountName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostNetwork": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "hostPID": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use the host's pid namespace. Optional: Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "hostIPC": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Use the host's ipc namespace. Optional: Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "shareProcessNamespace": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "securityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
+ Ref: ref("k8s.io/api/core/v1.PodSecurityContext"),
+ },
+ },
+ "imagePullSecrets": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "name",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ "affinity": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's scheduling constraints",
+ Ref: ref("k8s.io/api/core/v1.Affinity"),
+ },
+ },
+ "schedulerName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "tolerations": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, the pod's tolerations.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.Toleration"),
+ },
+ },
+ },
+ },
+ },
+ "priorityClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "priority": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "dnsConfig": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
+ Ref: ref("k8s.io/api/core/v1.PodDNSConfig"),
+ },
+ },
+ "runtimeClassName": {
+ SchemaProps: spec.SchemaProps{
+ Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "enableServiceLinks": {
+ SchemaProps: spec.SchemaProps{
+ Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
+ "topologySpreadConstraints": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-list-map-keys": []interface{}{
+ "topologyKey",
+ "whenUnsatisfiable",
+ },
+ "x-kubernetes-list-type": "map",
+ "x-kubernetes-patch-merge-key": "topologyKey",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"),
+ },
+ },
+ },
+ },
+ },
+ "args": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "env": {
+ SchemaProps: spec.SchemaProps{
+ Description: "List of environment variables to set in the container. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.EnvVar"),
+ },
+ },
+ },
+ },
+ },
+ "resources": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Compute Resources required by the sidecar container.",
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.ResourceRequirements"),
+ },
+ },
+ "livenessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Controllers may set default LivenessProbe if no liveness probe is provided. To ignore defaulting, set the value to empty LivenessProbe \"{}\". Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "readinessProbe": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. Controllers may set default ReadinessProbe if no readyness probe is provided. To ignore defaulting, set the value to empty ReadynessProbe \"{}\". More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+ Ref: ref("k8s.io/api/core/v1.Probe"),
+ },
+ },
+ "lifecycle": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+ Ref: ref("k8s.io/api/core/v1.Lifecycle"),
+ },
+ },
+ "containerSecurityContext": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ Ref: ref("k8s.io/api/core/v1.SecurityContext"),
+ },
+ },
+ "volumeMounts": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "mountPath",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("k8s.io/api/core/v1.VolumeMount"),
+ },
+ },
+ },
+ },
+ },
+ "podPlacementPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "PodPlacementPolicy is the reference of the podPlacementPolicy",
+ Ref: ref("k8s.io/api/core/v1.LocalObjectReference"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.VolumeMount", "kmodules.xyz/offshoot-api/api/v1.Volume"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_PodTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "PodTemplateSpec describes the data a pod should have when created from a template",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ObjectMeta"),
+ },
+ },
+ "controller": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Workload controller's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PodSpec"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.ObjectMeta", "kmodules.xyz/offshoot-api/api/v1.PodSpec"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_RuntimeSettings(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "pod": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings"),
+ },
+ },
+ "container": {
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.ContainerRuntimeSettings", "kmodules.xyz/offshoot-api/api/v1.PodRuntimeSettings"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ServicePort(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ServicePort contains information on service's port.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "port": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port that will be exposed by this service.",
+ Default: 0,
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "nodePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ },
+ Required: []string{"port"},
+ },
+ },
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceSpec describes the attributes that a user creates on a service.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "ports": {
+ VendorExtensible: spec.VendorExtensible{
+ Extensions: spec.Extensions{
+ "x-kubernetes-patch-merge-key": "port",
+ "x-kubernetes-patch-strategy": "merge",
+ },
+ },
+ SchemaProps: spec.SchemaProps{
+ Description: "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ServicePort"),
+ },
+ },
+ },
+ },
+ },
+ "clusterIP": {
+ SchemaProps: spec.SchemaProps{
+ Description: "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "type": {
+ SchemaProps: spec.SchemaProps{
+ Description: "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types\n\nPossible enum values:\n - `\"ClusterIP\"` means a service will only be accessible inside the cluster, via the cluster IP.\n - `\"ExternalName\"` means a service consists of only a reference to an external name that kubedns or equivalent will return as a CNAME record, with no exposing or proxying of any pods involved.\n - `\"LoadBalancer\"` means a service will be exposed via an external load balancer (if the cloud provider supports it), in addition to 'NodePort' type.\n - `\"NodePort\"` means a service will be exposed on one port of every node, in addition to 'ClusterIP' type.",
+ Type: []string{"string"},
+ Format: "",
+ Enum: []interface{}{"ClusterIP", "ExternalName", "LoadBalancer", "NodePort"},
+ },
+ },
+ "externalIPs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "loadBalancerIP": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "loadBalancerSourceRanges": {
+ SchemaProps: spec.SchemaProps{
+ Description: "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ },
+ },
+ },
+ "externalTrafficPolicy": {
+ SchemaProps: spec.SchemaProps{
+ Description: "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.\n\nPossible enum values:\n - `\"Cluster\"`\n - `\"Cluster\"` routes traffic to all endpoints.\n - `\"Local\"`\n - `\"Local\"` preserves the source IP of the traffic by routing only to endpoints on the same node as the traffic was received on (dropping the traffic if there are no local endpoints).",
+ Type: []string{"string"},
+ Format: "",
+ Enum: []interface{}{"Cluster", "Cluster", "Local", "Local"},
+ },
+ },
+ "healthCheckNodePort": {
+ SchemaProps: spec.SchemaProps{
+ Description: "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.",
+ Type: []string{"integer"},
+ Format: "int32",
+ },
+ },
+ "sessionAffinityConfig": {
+ SchemaProps: spec.SchemaProps{
+ Description: "sessionAffinityConfig contains the configurations of session affinity.",
+ Ref: ref("k8s.io/api/core/v1.SessionAffinityConfig"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.SessionAffinityConfig", "kmodules.xyz/offshoot-api/api/v1.ServicePort"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_ServiceTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "ServiceTemplateSpec describes the data a service should have when created from a template",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "metadata": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ObjectMeta"),
+ },
+ },
+ "spec": {
+ SchemaProps: spec.SchemaProps{
+ Description: "Specification of the desired behavior of the service. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
+ Default: map[string]interface{}{},
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.ServiceSpec"),
+ },
+ },
+ },
+ },
+ },
+ Dependencies: []string{
+ "kmodules.xyz/offshoot-api/api/v1.ObjectMeta", "kmodules.xyz/offshoot-api/api/v1.ServiceSpec"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "name": {
+ SchemaProps: spec.SchemaProps{
+ Description: "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
+ "hostPath": {
+ SchemaProps: spec.SchemaProps{
+ Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
+ },
+ },
+ "emptyDir": {
+ SchemaProps: spec.SchemaProps{
+ Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+ Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
+ },
+ },
+ "gcePersistentDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
+ },
+ },
+ "awsElasticBlockStore": {
+ SchemaProps: spec.SchemaProps{
+ Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
+ },
+ },
+ "secret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
+ },
+ },
+ "nfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
+ },
+ },
+ "iscsi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
+ Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"),
+ },
+ },
+ "glusterfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"),
+ },
+ },
+ "persistentVolumeClaim": {
+ SchemaProps: spec.SchemaProps{
+ Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
+ },
+ },
+ "rbd": {
+ SchemaProps: spec.SchemaProps{
+ Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"),
+ },
+ },
+ "flexVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"),
+ },
+ },
+ "cinder": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"),
+ },
+ },
+ "cephfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"),
+ },
+ },
+ "flocker": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
+ },
+ },
+ "downwardAPI": {
+ SchemaProps: spec.SchemaProps{
+ Description: "downwardAPI represents downward API about the pod that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"),
+ },
+ },
+ "fc": {
+ SchemaProps: spec.SchemaProps{
+ Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
+ },
+ },
+ "azureFile": {
+ SchemaProps: spec.SchemaProps{
+ Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"),
+ },
+ },
+ "configMap": {
+ SchemaProps: spec.SchemaProps{
+ Description: "configMap represents a configMap that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
+ },
+ },
+ "vsphereVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
+ },
+ },
+ "quobyte": {
+ SchemaProps: spec.SchemaProps{
+ Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
+ },
+ },
+ "azureDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
+ },
+ },
+ "photonPersistentDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
+ },
+ },
+ "projected": {
+ SchemaProps: spec.SchemaProps{
+ Description: "projected items for all in one resources secrets, configmaps, and downward API",
+ Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
+ },
+ },
+ "portworxVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
+ },
+ },
+ "scaleIO": {
+ SchemaProps: spec.SchemaProps{
+ Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"),
+ },
+ },
+ "storageos": {
+ SchemaProps: spec.SchemaProps{
+ Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"),
+ },
+ },
+ "csi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
+ Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
+ },
+ },
+ "ephemeral": {
+ SchemaProps: spec.SchemaProps{
+ Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"),
+ },
+ },
+ },
+ Required: []string{"name"},
+ },
+ },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"},
+ }
+}
+
+func schema_kmodulesxyz_offshoot_api_api_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
+ return common.OpenAPIDefinition{
+ Schema: spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Description: "Represents the source of a volume to mount. Only one of its members may be specified.",
+ Type: []string{"object"},
+ Properties: map[string]spec.Schema{
+ "hostPath": {
+ SchemaProps: spec.SchemaProps{
+ Description: "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+ Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"),
+ },
+ },
+ "emptyDir": {
+ SchemaProps: spec.SchemaProps{
+ Description: "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+ Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"),
+ },
+ },
+ "gcePersistentDisk": {
+ SchemaProps: spec.SchemaProps{
+ Description: "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+ Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"),
+ },
+ },
+ "awsElasticBlockStore": {
+ SchemaProps: spec.SchemaProps{
+ Description: "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+ Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"),
+ },
+ },
+ "secret": {
+ SchemaProps: spec.SchemaProps{
+ Description: "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+ Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"),
+ },
+ },
+ "nfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+ Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"),
+ },
+ },
+ "iscsi": {
+ SchemaProps: spec.SchemaProps{
+ Description: "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
+ Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"),
+ },
+ },
+ "glusterfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+ Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"),
+ },
+ },
+ "persistentVolumeClaim": {
+ SchemaProps: spec.SchemaProps{
+ Description: "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+ Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"),
+ },
+ },
+ "rbd": {
+ SchemaProps: spec.SchemaProps{
+ Description: "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md",
+ Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"),
+ },
+ },
+ "flexVolume": {
+ SchemaProps: spec.SchemaProps{
+ Description: "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+ Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"),
+ },
+ },
+ "cinder": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+ Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"),
+ },
+ },
+ "cephfs": {
+ SchemaProps: spec.SchemaProps{
+ Description: "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"),
},
},
- "scope": {
+ "flocker": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
+ Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"),
},
},
- },
- Required: []string{"group"},
- },
- },
- }
-}
-
-func schema_kmodulesxyz_client_go_api_v1_TLSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "issuerRef": {
+ "downwardAPI": {
SchemaProps: spec.SchemaProps{
- Description: "IssuerRef is a reference to a Certificate Issuer.",
- Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"),
+ Description: "downwardAPI represents downward API about the pod that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"),
},
},
- "certificates": {
+ "fc": {
SchemaProps: spec.SchemaProps{
- Description: "Certificate provides server and/or client certificate options used by application pods. These options are passed to a cert-manager Certificate object. xref: https://github.com/jetstack/cert-manager/blob/v0.16.0/pkg/apis/certmanager/v1beta1/types_certificate.go#L82-L162",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: map[string]interface{}{},
- Ref: ref("kmodules.xyz/client-go/api/v1.CertificateSpec"),
- },
- },
- },
+ Description: "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+ Ref: ref("k8s.io/api/core/v1.FCVolumeSource"),
},
},
- },
- },
- },
- Dependencies: []string{
- "k8s.io/api/core/v1.TypedLocalObjectReference", "kmodules.xyz/client-go/api/v1.CertificateSpec"},
- }
-}
-
-func schema_kmodulesxyz_client_go_api_v1_TimeOfDay(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "TimeOfDay is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
- Type: apiv1.TimeOfDay{}.OpenAPISchemaType(),
- Format: apiv1.TimeOfDay{}.OpenAPISchemaFormat(),
- },
- },
- }
-}
-
-func schema_kmodulesxyz_client_go_api_v1_TypedObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "TypedObjectReference represents an typed namespaced object.",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "apiGroup": {
+ "azureFile": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "azureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"),
},
},
- "kind": {
+ "configMap": {
SchemaProps: spec.SchemaProps{
- Type: []string{"string"},
- Format: "",
+ Description: "configMap represents a configMap that should populate this volume",
+ Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"),
},
},
- "namespace": {
+ "vsphereVolume": {
SchemaProps: spec.SchemaProps{
- Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
- Type: []string{"string"},
- Format: "",
+ Description: "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"),
},
},
- "name": {
+ "quobyte": {
SchemaProps: spec.SchemaProps{
- Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
- Default: "",
- Type: []string{"string"},
- Format: "",
+ Description: "quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
+ Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"),
},
},
- },
- Required: []string{"name"},
- },
- },
- }
-}
-
-func schema_kmodulesxyz_client_go_api_v1_X509Subject(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Description: "X509Subject Full X509 name specification",
- Type: []string{"object"},
- Properties: map[string]spec.Schema{
- "organizations": {
+ "azureDisk": {
SchemaProps: spec.SchemaProps{
- Description: "Organizations to be used on the Certificate.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+ Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"),
},
},
- "countries": {
+ "photonPersistentDisk": {
SchemaProps: spec.SchemaProps{
- Description: "Countries to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"),
},
},
- "organizationalUnits": {
+ "projected": {
SchemaProps: spec.SchemaProps{
- Description: "Organizational Units to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "projected items for all in one resources secrets, configmaps, and downward API",
+ Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"),
},
},
- "localities": {
+ "portworxVolume": {
SchemaProps: spec.SchemaProps{
- Description: "Cities to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "portworxVolume represents a portworx volume attached and mounted on kubelets host machine",
+ Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"),
},
},
- "provinces": {
+ "scaleIO": {
SchemaProps: spec.SchemaProps{
- Description: "State/Provinces to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"),
},
},
- "streetAddresses": {
+ "storageos": {
SchemaProps: spec.SchemaProps{
- Description: "Street addresses to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
+ Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"),
},
},
- "postalCodes": {
+ "csi": {
SchemaProps: spec.SchemaProps{
- Description: "Postal codes to be used on the CertificateSpec.",
- Type: []string{"array"},
- Items: &spec.SchemaOrArray{
- Schema: &spec.Schema{
- SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
- },
- },
- },
+ Description: "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
+ Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"),
},
},
- "serialNumber": {
+ "ephemeral": {
SchemaProps: spec.SchemaProps{
- Description: "Serial number to be used on the CertificateSpec.",
- Type: []string{"string"},
- Format: "",
+ Description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
+ Ref: ref("kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"),
},
},
},
},
},
- }
-}
-
-func schema_kmodulesxyz_client_go_api_v1_stringSetMerger(ref common.ReferenceCallback) common.OpenAPIDefinition {
- return common.OpenAPIDefinition{
- Schema: spec.Schema{
- SchemaProps: spec.SchemaProps{
- Type: []string{"object"},
- },
- },
+ Dependencies: []string{
+ "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "kmodules.xyz/offshoot-api/api/v1.EphemeralVolumeSource"},
}
}
@@ -17191,22 +19085,21 @@ func schema_resource_metadata_apis_identity_v1alpha1_InboxTokenRequestResponse(r
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
- "jmapJWTToken": {
+ "agentJwtToken": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
- "adminJWTToken": {
+ "caBundle": {
SchemaProps: spec.SchemaProps{
- Default: "",
- Type: []string{"string"},
- Format: "",
+ Type: []string{"string"},
+ Format: "",
},
},
},
- Required: []string{"jmapJWTToken", "adminJWTToken"},
+ Required: []string{"agentJwtToken"},
},
},
}
@@ -17658,6 +19551,13 @@ func schema_resource_metadata_apis_identity_v1alpha1_SubjectAccessNamespaceRevie
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Properties: map[string]spec.Schema{
+ "allNamespaces": {
+ SchemaProps: spec.SchemaProps{
+ Default: false,
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
"namespaces": {
SchemaProps: spec.SchemaProps{
Type: []string{"array"},
@@ -17695,6 +19595,7 @@ func schema_resource_metadata_apis_identity_v1alpha1_SubjectAccessNamespaceRevie
},
},
},
+ Required: []string{"allNamespaces"},
},
},
}
@@ -18449,6 +20350,14 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref common
Format: "",
},
},
+ "microsoft": {
+ SchemaProps: spec.SchemaProps{
+ Description: "mcr.microsoft.com",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"appscode": {
SchemaProps: spec.SchemaProps{
Description: "r.appscode.com",
@@ -18481,6 +20390,12 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref common
Ref: ref("kmodules.xyz/resource-metadata/apis/shared.ResourceQuery"),
},
},
+ "impersonate": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
},
Required: []string{"ref", "query"},
},
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/selfsubjectnamespaceaccessreview.go b/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/selfsubjectnamespaceaccessreview.go
index 71925b9e7..bd5ad929d 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/selfsubjectnamespaceaccessreview.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/identity/v1alpha1/selfsubjectnamespaceaccessreview.go
@@ -64,8 +64,9 @@ type SelfSubjectNamespaceAccessReviewSpec struct {
}
type SubjectAccessNamespaceReviewStatus struct {
- Namespaces []string `json:"namespaces,omitempty"`
- Projects map[string][]string `json:"projects,omitempty"`
+ AllNamespaces bool `json:"allNamespaces"`
+ Namespaces []string `json:"namespaces,omitempty"`
+ Projects map[string][]string `json:"projects,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go
index bc475e4df..c0a501fe6 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go
@@ -50,6 +50,9 @@ type RegistryProxies struct {
// registry.k8s.io
//+optional
Kubernetes string `json:"kubernetes"`
+ // mcr.microsoft.com
+ //+optional
+ Microsoft string `json:"microsoft"`
// r.appscode.com
//+optional
AppsCode string `json:"appscode"`
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go
index f82448e4d..7ca8bc9b6 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go
@@ -737,6 +737,14 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref common
Format: "",
},
},
+ "microsoft": {
+ SchemaProps: spec.SchemaProps{
+ Description: "mcr.microsoft.com",
+ Default: "",
+ Type: []string{"string"},
+ Format: "",
+ },
+ },
"appscode": {
SchemaProps: spec.SchemaProps{
Description: "r.appscode.com",
@@ -769,6 +777,12 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref common
Ref: ref("kmodules.xyz/resource-metadata/apis/shared.ResourceQuery"),
},
},
+ "impersonate": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"boolean"},
+ Format: "",
+ },
+ },
},
Required: []string{"ref", "query"},
},
diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/types.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/types.go
index 930c6bf4b..2d91b1370 100644
--- a/vendor/kmodules.xyz/resource-metadata/apis/shared/types.go
+++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/types.go
@@ -38,8 +38,9 @@ type DeploymentParameters struct {
}
type ResourceLocator struct {
- Ref metav1.GroupKind `json:"ref"`
- Query ResourceQuery `json:"query"`
+ Ref metav1.GroupKind `json:"ref"`
+ Query ResourceQuery `json:"query"`
+ Impersonate bool `json:"impersonate,omitempty"`
}
// +kubebuilder:validation:Enum=REST;GraphQL
diff --git a/vendor/kmodules.xyz/resource-metadata/pkg/identity/b3.go b/vendor/kmodules.xyz/resource-metadata/pkg/identity/b3.go
index 06472293c..b58da638f 100644
--- a/vendor/kmodules.xyz/resource-metadata/pkg/identity/b3.go
+++ b/vendor/kmodules.xyz/resource-metadata/pkg/identity/b3.go
@@ -19,6 +19,8 @@ package identity
import (
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
+ "errors"
"io"
"net"
"net/http"
@@ -35,6 +37,8 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/klog/v2"
+ "moul.io/http2curl/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -97,8 +101,20 @@ func (c *Client) Identify(clusterUID string) (*kmapi.ClusterMetadata, error) {
if c.token != "" {
req.Header.Add("Authorization", "Bearer "+c.token)
}
+ if klog.V(8).Enabled() {
+ command, _ := http2curl.GetCurlCommand(req)
+ klog.V(8).Infoln(command.String())
+ }
+
resp, err := c.client.Do(req)
if err != nil {
+ var ce *tls.CertificateVerificationError
+ if errors.As(err, &ce) {
+ klog.ErrorS(err, "UnverifiedCertificates")
+ for _, cert := range ce.UnverifiedCertificates {
+ klog.Errorln(string(encodeCertPEM(cert)))
+ }
+ }
return nil, err
}
defer resp.Body.Close()
@@ -131,38 +147,57 @@ func (c *Client) Identify(clusterUID string) (*kmapi.ClusterMetadata, error) {
return &md, nil
}
-func (c *Client) GetToken() (string, error) {
+func (c *Client) GetToken() (*identityapi.InboxTokenRequestResponse, error) {
u, err := info.APIServerAddress(c.baseURL)
if err != nil {
- return "", err
+ return nil, err
}
id, err := c.GetIdentity()
if err != nil {
- return "", err
+ return nil, err
}
u.Path = path.Join(u.Path, "api/v1/agent", id.Status.Name, id.Status.UID, "token")
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
- return "", err
+ return nil, err
}
req.Header.Set("Content-Type", "application/json")
// add authorization header to the req
if c.token != "" {
req.Header.Add("Authorization", "Bearer "+c.token)
}
+ if klog.V(8).Enabled() {
+ command, _ := http2curl.GetCurlCommand(req)
+ klog.V(8).Infoln(command.String())
+ }
+
resp, err := c.client.Do(req)
if err != nil {
- return "", err
+ var ce *tls.CertificateVerificationError
+ if errors.As(err, &ce) {
+ klog.ErrorS(err, "UnverifiedCertificates")
+ for _, cert := range ce.UnverifiedCertificates {
+ klog.Errorln(string(encodeCertPEM(cert)))
+ }
+ }
+ return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
- return "", err
+ return nil, err
+ }
+
+ tokenResponse := &identityapi.InboxTokenRequestResponse{}
+ if err = json.Unmarshal(body, tokenResponse); err != nil {
+ return nil, err
}
- return string(body), nil
+ tokenResponse.CABundle = string(c.caCert)
+
+ return tokenResponse, nil
}
const SelfName = "self"
@@ -181,3 +216,11 @@ func (c *Client) GetIdentity() (*identityapi.ClusterIdentity, error) {
Status: *md,
}, nil
}
+
+func encodeCertPEM(cert *x509.Certificate) []byte {
+ block := pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: cert.Raw,
+ }
+ return pem.EncodeToMemory(&block)
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/api/constants.go b/vendor/kmodules.xyz/resource-metrics/api/constants.go
index 316ea1639..4668a2843 100644
--- a/vendor/kmodules.xyz/resource-metrics/api/constants.go
+++ b/vendor/kmodules.xyz/resource-metrics/api/constants.go
@@ -16,7 +16,11 @@ limitations under the License.
package api
-import core "k8s.io/api/core/v1"
+import (
+ "errors"
+
+ core "k8s.io/api/core/v1"
+)
type PodRole string
@@ -56,6 +60,8 @@ const (
PodRoleRouters PodRole = "routers"
)
+var ErrMissingRefObject = errors.New("referenced object not found")
+
type ReplicaList map[PodRole]int64
type PodInfo struct {
diff --git a/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/mssqlserver.go b/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/mssqlserver.go
index 6dd2b24e3..9f6bf271f 100644
--- a/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/mssqlserver.go
+++ b/vendor/kmodules.xyz/resource-metrics/kubedb.com/v1alpha2/mssqlserver.go
@@ -41,6 +41,7 @@ func (r MSSQLServer) ResourceCalculator() api.ResourceCalculator {
AppRoles: []api.PodRole{api.PodRoleDefault},
RuntimeRoles: []api.PodRole{api.PodRoleDefault, api.PodRoleExporter},
RoleReplicasFn: r.roleReplicasFn,
+ ModeFn: r.modeFn,
UsesTLSFn: r.usesTLSFn,
RoleResourceLimitsFn: r.roleResourceFn(api.ResourceLimits),
RoleResourceRequestsFn: r.roleResourceFn(api.ResourceRequests),
@@ -58,6 +59,17 @@ func (r MSSQLServer) roleReplicasFn(obj map[string]interface{}) (api.ReplicaList
return api.ReplicaList{api.PodRoleDefault: replicas}, nil
}
+func (r MSSQLServer) modeFn(obj map[string]interface{}) (string, error) {
+ mode, found, err := unstructured.NestedString(obj, "spec", "topology", "mode")
+ if err != nil {
+ return "", err
+ }
+ if found {
+ return mode, nil
+ }
+ return DBModeStandalone, nil
+}
+
func (r MSSQLServer) usesTLSFn(obj map[string]interface{}) (bool, error) {
enabled, found, err := unstructured.NestedBool(obj, "spec", "tls", "clientTLS")
if err != nil {
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/druid_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/druid_mapping.go
new file mode 100644
index 000000000..6f108ec78
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/druid_mapping.go
@@ -0,0 +1,68 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&DruidOpsRequest{})
+}
+
+type DruidOpsRequest struct{}
+
+var _ OpsPathMapper = (*DruidOpsRequest)(nil)
+
+func (m *DruidOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.topology.coordinators": "spec.topology.coordinators.replicas",
+ "spec.horizontalScaling.topology.overloads": "spec.topology.overloads.replicas",
+ "spec.horizontalScaling.topology.middleManagers": "spec.topology.middleManagers.replicas",
+ "spec.horizontalScaling.topology.historicals": "spec.topology.historicals.replicas",
+ "spec.horizontalScaling.topology.brokers": "spec.topology.brokers.replicas",
+ "spec.horizontalScaling.topology.routers": "spec.topology.routers.replicas",
+ }
+}
+
+func (m *DruidOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.topology.coordinators": "spec.topology.coordinators.podTemplate.spec.resources",
+ "spec.verticalScaling.topology.overloads": "spec.topology.overloads.podTemplate.spec.resources",
+ "spec.verticalScaling.topology.middleManagers": "spec.topology.middleManagers.podTemplate.spec.resources",
+ "spec.verticalScaling.topology.historicals": "spec.topology.historicals.podTemplate.spec.resources",
+ "spec.verticalScaling.topology.brokers": "spec.topology.brokers.podTemplate.spec.resources",
+ "spec.verticalScaling.topology.routers": "spec.topology.routers.podTemplate.spec.resources",
+ }
+}
+
+func (m *DruidOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.topology.middleManagers": "spec.topology.middleManagers.podTemplate.spec.storage.resources.requests.storage",
+ "spec.volumeExpansion.topology.historicals": "spec.topology.historicals.podTemplate.spec.storage.resources.requests.storage",
+ }
+}
+
+func (m *DruidOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *DruidOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "DruidOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/ferretdb_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/ferretdb_mapping.go
new file mode 100644
index 000000000..d32f2edf5
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/ferretdb_mapping.go
@@ -0,0 +1,55 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&FerretDBOpsRequest{})
+}
+
+type FerretDBOpsRequest struct{}
+
+var _ OpsPathMapper = (*FerretDBOpsRequest)(nil)
+
+func (m *FerretDBOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.node": "spec.replicas",
+ }
+}
+
+func (m *FerretDBOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.node": "spec.podTemplate.spec.resources",
+ }
+}
+
+func (m *FerretDBOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{}
+}
+
+func (m *FerretDBOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *FerretDBOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "FerretDBOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/mssqlserver_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/mssqlserver_mapping.go
new file mode 100644
index 000000000..55cb0d6fc
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/mssqlserver_mapping.go
@@ -0,0 +1,59 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&MSSQLServerOpsRequest{})
+}
+
+type MSSQLServerOpsRequest struct{}
+
+var _ OpsPathMapper = (*MSSQLServerOpsRequest)(nil)
+
+func (m *MSSQLServerOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.replicas": "spec.replicas",
+ }
+}
+
+func (m *MSSQLServerOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.mssqlserver": "spec.podTemplate.spec.resources",
+ "spec.verticalScaling.exporter": "spec.monitor.prometheus.exporter.resources",
+ "spec.verticalScaling.coordinator": "spec.coordinator.resources",
+ }
+}
+
+func (m *MSSQLServerOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.mssqlserver": "spec.storage.resources.requests.storage",
+ }
+}
+
+func (m *MSSQLServerOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *MSSQLServerOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "MSSQLServerOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/perconaxtradb_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/perconaxtradb_mapping.go
new file mode 100644
index 000000000..366fc415c
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/perconaxtradb_mapping.go
@@ -0,0 +1,59 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&PerconaXtraDBOpsRequest{})
+}
+
+type PerconaXtraDBOpsRequest struct{}
+
+var _ OpsPathMapper = (*PerconaXtraDBOpsRequest)(nil)
+
+func (m *PerconaXtraDBOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.member": "spec.replicas",
+ }
+}
+
+func (m *PerconaXtraDBOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.perconaxtradb": "spec.podTemplate.spec.resources",
+ "spec.verticalScaling.exporter": "spec.monitor.prometheus.exporter.resources",
+ "spec.verticalScaling.coordinator": "spec.coordinator.resources",
+ }
+}
+
+func (m *PerconaXtraDBOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.perconaxtradb": "spec.storage.resources.requests.storage",
+ }
+}
+
+func (m *PerconaXtraDBOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *PerconaXtraDBOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "PerconaXtraDBOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/pgpool_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/pgpool_mapping.go
new file mode 100644
index 000000000..534a33bf6
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/pgpool_mapping.go
@@ -0,0 +1,55 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&PgpoolOpsRequest{})
+}
+
+type PgpoolOpsRequest struct{}
+
+var _ OpsPathMapper = (*PgpoolOpsRequest)(nil)
+
+func (m *PgpoolOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.node": "spec.replicas",
+ }
+}
+
+func (m *PgpoolOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.node": "spec.podTemplate.spec.resources",
+ }
+}
+
+func (m *PgpoolOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{}
+}
+
+func (m *PgpoolOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *PgpoolOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "PgpoolOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/proxysql_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/proxysql_mapping.go
index 3f2e17223..cbd2426f9 100644
--- a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/proxysql_mapping.go
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/proxysql_mapping.go
@@ -43,7 +43,7 @@ func (m *ProxySQLOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]Referen
}
func (m *ProxySQLOpsRequest) GetAppRefPath() []string {
- return []string{"spec", "proxyRef", "referencedDB"}
+ return []string{"spec", "proxyRef"}
}
func (m *ProxySQLOpsRequest) GroupVersionKind() schema.GroupVersionKind {
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/rabbitmq_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/rabbitmq_mapping.go
new file mode 100644
index 000000000..d66bb316a
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/rabbitmq_mapping.go
@@ -0,0 +1,57 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&RabbitMQOpsRequest{})
+}
+
+type RabbitMQOpsRequest struct{}
+
+var _ OpsPathMapper = (*RabbitMQOpsRequest)(nil)
+
+func (m *RabbitMQOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.node": "spec.replicas",
+ }
+}
+
+func (m *RabbitMQOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.node": "spec.podTemplate.spec.resources",
+ }
+}
+
+func (m *RabbitMQOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.node": "spec.storage.resources.requests.storage",
+ }
+}
+
+func (m *RabbitMQOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *RabbitMQOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "RabbitMQOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/scale.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/scale.go
index 9a9467f0d..feb381a3c 100644
--- a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/scale.go
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/scale.go
@@ -18,9 +18,10 @@ package v1alpha1
import (
"errors"
- "fmt"
"strings"
+ "kmodules.xyz/resource-metrics/api"
+
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@@ -69,7 +70,14 @@ func extractReferencedObject(opsObj map[string]interface{}, refDbPath ...string)
}
dbObj, found, _ := unstructured.NestedMap(opsObj, refDbPath...)
if !found {
- return nil, errors.New("referenced db object not found")
+ return nil, api.ErrMissingRefObject
+ }
+
+ _, foundApiVersion := dbObj["apiVersion"]
+ _, foundKind := dbObj["kind"]
+ _, foundMetadata := dbObj["metadata"]
+ if !foundApiVersion || !foundKind || !foundMetadata {
+ return nil, api.ErrMissingRefObject
}
return dbObj, nil
@@ -106,5 +114,5 @@ func getMapping(opsObj OpsReqObject, opsMapper OpsPathMapper) (map[OpsReqPath]Re
return opsMapper.VolumeExpansionPathMapping(), nil
}
- return nil, fmt.Errorf("scaling type `%s` not supported", scalingType)
+ return nil, nil
}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/singlestore_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/singlestore_mapping.go
new file mode 100644
index 000000000..b21c81aad
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/singlestore_mapping.go
@@ -0,0 +1,63 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&SinglestoreOpsRequest{})
+}
+
+type SinglestoreOpsRequest struct{}
+
+var _ OpsPathMapper = (*SinglestoreOpsRequest)(nil)
+
+func (m *SinglestoreOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.aggregator": "spec.topology.aggregator.replicas",
+ "spec.horizontalScaling.leaf": "spec.topology.leaf.replicas",
+ }
+}
+
+func (m *SinglestoreOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.node": "spec.podTemplate.spec.resources",
+ "spec.verticalScaling.aggregator": "spec.topology.aggregator.podTemplate.spec.resources",
+ "spec.verticalScaling.leaf": "spec.topology.leaf.podTemplate.spec.resources",
+ "spec.verticalScaling.coordinator": "spec.coordinator.resources",
+ }
+}
+
+func (m *SinglestoreOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.node": "spec.storage.resources.requests.storage",
+ "spec.volumeExpansion.aggregator": "spec.topology.aggregator.storage.resources.requests.storage",
+ "spec.volumeExpansion.leaf": "spec.topology.leaf.storage.resources.requests.storage",
+ }
+}
+
+func (m *SinglestoreOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *SinglestoreOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "SinglestoreOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/solr_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/solr_mapping.go
new file mode 100644
index 000000000..9ae893d16
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/solr_mapping.go
@@ -0,0 +1,66 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&SolrOpsRequest{})
+}
+
+type SolrOpsRequest struct{}
+
+var _ OpsPathMapper = (*SolrOpsRequest)(nil)
+
+func (m *SolrOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.node": "spec.replicas",
+ "spec.horizontalScaling.data": "spec.topology.data.replicas",
+ "spec.horizontalScaling.overseer": "spec.topology.overseer.replicas",
+ "spec.horizontalScaling.coordinator": "spec.topology.coordinator.replicas",
+ }
+}
+
+func (m *SolrOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.node": "spec.podTemplate.spec.resources",
+ "spec.verticalScaling.data": "spec.topology.data.podTemplate.spec.resources",
+ "spec.verticalScaling.overseer": "spec.topology.overseer.podTemplate.spec.resources",
+ "spec.verticalScaling.coordinator": "spec.topology.coordinator.podTemplate.spec.resources",
+ }
+}
+
+func (m *SolrOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.node": "spec.storage.resources.requests.storage",
+ "spec.volumeExpansion.data": "spec.topology.data.storage.resources.requests.storage",
+ "spec.volumeExpansion.overseer": "spec.topology.overseer.storage.resources.requests.storage",
+ "spec.volumeExpansion.coordinator": "spec.topology.coordinator.storage.resources.requests.storage",
+ }
+}
+
+func (m *SolrOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *SolrOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "SolrOpsRequest",
+ }
+}
diff --git a/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/zookeeper_mapping.go b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/zookeeper_mapping.go
new file mode 100644
index 000000000..f49cb28be
--- /dev/null
+++ b/vendor/kmodules.xyz/resource-metrics/ops.kubedb.com/v1alpha1/zookeeper_mapping.go
@@ -0,0 +1,57 @@
+/*
+Copyright AppsCode Inc. and Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+func init() {
+ RegisterOpsPathMapperToPlugins(&ZooKeeperOpsRequest{})
+}
+
+type ZooKeeperOpsRequest struct{}
+
+var _ OpsPathMapper = (*ZooKeeperOpsRequest)(nil)
+
+func (m *ZooKeeperOpsRequest) HorizontalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.horizontalScaling.replicas": "spec.replicas",
+ }
+}
+
+func (m *ZooKeeperOpsRequest) VerticalPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.verticalScaling.node": "spec.podTemplate.spec.resources",
+ }
+}
+
+func (m *ZooKeeperOpsRequest) VolumeExpansionPathMapping() map[OpsReqPath]ReferencedObjPath {
+ return map[OpsReqPath]ReferencedObjPath{
+ "spec.volumeExpansion.node": "spec.storage.resources.requests.storage",
+ }
+}
+
+func (m *ZooKeeperOpsRequest) GetAppRefPath() []string {
+ return []string{"spec", "databaseRef"}
+}
+
+func (m *ZooKeeperOpsRequest) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{
+ Group: "ops.kubedb.com",
+ Version: "v1alpha1",
+ Kind: "ZooKeeperOpsRequest",
+ }
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 57daf466a..1ac50ffbb 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -35,8 +35,8 @@ github.com/Azure/go-autorest/tracing
# github.com/Masterminds/goutils v1.1.1
## explicit
github.com/Masterminds/goutils
-# github.com/Masterminds/semver/v3 v3.2.1
-## explicit; go 1.18
+# github.com/Masterminds/semver/v3 v3.3.0
+## explicit; go 1.21
github.com/Masterminds/semver/v3
# github.com/Masterminds/sprig/v3 v3.2.3 => github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8
## explicit; go 1.17
@@ -181,11 +181,11 @@ github.com/docker/docker-credential-helpers/credentials
# github.com/dustin/go-humanize v1.0.1
## explicit; go 1.16
github.com/dustin/go-humanize
-# github.com/emicklei/go-restful/v3 v3.11.0
+# github.com/emicklei/go-restful/v3 v3.12.1
## explicit; go 1.13
github.com/emicklei/go-restful/v3
github.com/emicklei/go-restful/v3/log
-# github.com/evanphx/json-patch v5.7.0+incompatible
+# github.com/evanphx/json-patch v5.9.0+incompatible
## explicit
github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.9.0
@@ -220,15 +220,15 @@ github.com/go-logr/logr/funcr
# github.com/go-logr/stdr v1.2.2
## explicit; go 1.16
github.com/go-logr/stdr
-# github.com/go-openapi/jsonpointer v0.20.0
-## explicit; go 1.18
+# github.com/go-openapi/jsonpointer v0.21.0
+## explicit; go 1.20
github.com/go-openapi/jsonpointer
-# github.com/go-openapi/jsonreference v0.20.2
-## explicit; go 1.13
+# github.com/go-openapi/jsonreference v0.21.0
+## explicit; go 1.20
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
-# github.com/go-openapi/swag v0.22.4
-## explicit; go 1.18
+# github.com/go-openapi/swag v0.23.0
+## explicit; go 1.20
github.com/go-openapi/swag
# github.com/go-sql-driver/mysql v1.8.1
## explicit; go 1.18
@@ -390,7 +390,7 @@ github.com/grpc-ecosystem/go-grpc-prometheus
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
-# github.com/huandu/xstrings v1.4.0
+# github.com/huandu/xstrings v1.5.0
## explicit; go 1.12
github.com/huandu/xstrings
# github.com/imdario/mergo v0.3.16 => github.com/imdario/mergo v0.3.6
@@ -411,8 +411,8 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.2
-## explicit; go 1.18
+# github.com/klauspost/compress v1.17.9
+## explicit; go 1.20
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -443,9 +443,6 @@ github.com/liggitt/tabwriter
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
-# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0
-## explicit; go 1.19
-github.com/matttproud/golang_protobuf_extensions/v2/pbutil
# github.com/mitchellh/copystructure v1.2.0
## explicit; go 1.15
github.com/mitchellh/copystructure
@@ -481,13 +478,13 @@ github.com/munnerz/goautoneg
# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
## explicit
github.com/mxk/go-flowrate/flowrate
-# github.com/nats-io/nats.go v1.36.0
-## explicit; go 1.20
+# github.com/nats-io/nats.go v1.39.0
+## explicit; go 1.22.0
github.com/nats-io/nats.go
github.com/nats-io/nats.go/encoders/builtin
github.com/nats-io/nats.go/internal/parser
github.com/nats-io/nats.go/util
-# github.com/nats-io/nkeys v0.4.7
+# github.com/nats-io/nkeys v0.4.9
## explicit; go 1.20
github.com/nats-io/nkeys
# github.com/nats-io/nuid v1.0.1
@@ -553,16 +550,16 @@ github.com/prometheus/client_golang/prometheus/push
github.com/prometheus/client_golang/prometheus/testutil
github.com/prometheus/client_golang/prometheus/testutil/promlint
github.com/prometheus/client_golang/prometheus/testutil/promlint/validations
-# github.com/prometheus/client_model v0.5.0
+# github.com/prometheus/client_model v0.6.1
## explicit; go 1.19
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.45.0
+# github.com/prometheus/common v0.46.0
## explicit; go 1.20
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.12.0
-## explicit; go 1.19
+# github.com/prometheus/procfs v0.15.0
+## explicit; go 1.21
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -587,14 +584,14 @@ github.com/russross/blackfriday/v2
# github.com/sergi/go-diff v1.2.0
## explicit; go 1.12
github.com/sergi/go-diff/diffmatchpatch
-# github.com/shopspring/decimal v1.3.1
-## explicit; go 1.13
+# github.com/shopspring/decimal v1.4.0
+## explicit; go 1.10
github.com/shopspring/decimal
# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
github.com/sirupsen/logrus
-# github.com/spf13/cast v1.5.0
-## explicit; go 1.18
+# github.com/spf13/cast v1.7.0
+## explicit; go 1.19
github.com/spf13/cast
# github.com/spf13/cobra v1.8.0
## explicit; go 1.15
@@ -622,24 +619,24 @@ github.com/yudai/golcs
# github.com/zeebo/xxh3 v1.0.2
## explicit; go 1.17
github.com/zeebo/xxh3
-# go.bytebuilders.dev/audit v0.0.38
+# go.bytebuilders.dev/audit v0.0.41
## explicit; go 1.22.1
go.bytebuilders.dev/audit/api/v1
go.bytebuilders.dev/audit/lib
-# go.bytebuilders.dev/license-proxyserver v0.0.19
+# go.bytebuilders.dev/license-proxyserver v0.0.20
## explicit; go 1.22.0
go.bytebuilders.dev/license-proxyserver/apis/proxyserver
go.bytebuilders.dev/license-proxyserver/apis/proxyserver/v1alpha1
go.bytebuilders.dev/license-proxyserver/client/clientset/versioned
go.bytebuilders.dev/license-proxyserver/client/clientset/versioned/scheme
go.bytebuilders.dev/license-proxyserver/client/clientset/versioned/typed/proxyserver/v1alpha1
-# go.bytebuilders.dev/license-verifier v0.14.4
+# go.bytebuilders.dev/license-verifier v0.14.6
## explicit; go 1.21
go.bytebuilders.dev/license-verifier
go.bytebuilders.dev/license-verifier/apis/licenses
go.bytebuilders.dev/license-verifier/apis/licenses/v1alpha1
go.bytebuilders.dev/license-verifier/info
-# go.bytebuilders.dev/license-verifier/kubernetes v0.14.4
+# go.bytebuilders.dev/license-verifier/kubernetes v0.14.6
## explicit; go 1.22.0
go.bytebuilders.dev/license-verifier/kubernetes
# go.etcd.io/etcd/api/v3 v3.5.10
@@ -786,7 +783,6 @@ golang.org/x/crypto/chacha20poly1305
golang.org/x/crypto/cryptobyte
golang.org/x/crypto/cryptobyte/asn1
golang.org/x/crypto/curve25519
-golang.org/x/crypto/ed25519
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
@@ -797,7 +793,7 @@ golang.org/x/crypto/pkcs12
golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/scrypt
-# golang.org/x/exp v0.0.0-20230905200255-921286631fa9
+# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
## explicit; go 1.20
golang.org/x/exp/constraints
golang.org/x/exp/maps
@@ -1043,8 +1039,9 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.33.0
-## explicit; go 1.17
+# google.golang.org/protobuf v1.34.2
+## explicit; go 1.20
+google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
@@ -1052,6 +1049,7 @@ google.golang.org/protobuf/internal/descfmt
google.golang.org/protobuf/internal/descopts
google.golang.org/protobuf/internal/detrand
google.golang.org/protobuf/internal/editiondefaults
+google.golang.org/protobuf/internal/editionssupport
google.golang.org/protobuf/internal/encoding/defval
google.golang.org/protobuf/internal/encoding/json
google.golang.org/protobuf/internal/encoding/messageset
@@ -1921,13 +1919,13 @@ kmodules.xyz/prober/probe
kmodules.xyz/prober/probe/exec
kmodules.xyz/prober/probe/http
kmodules.xyz/prober/probe/tcp
-# kmodules.xyz/resource-metadata v0.18.12
+# kmodules.xyz/resource-metadata v0.24.3
## explicit; go 1.22.1
kmodules.xyz/resource-metadata/apis/core/v1alpha1
kmodules.xyz/resource-metadata/apis/identity/v1alpha1
kmodules.xyz/resource-metadata/apis/shared
kmodules.xyz/resource-metadata/pkg/identity
-# kmodules.xyz/resource-metrics v0.30.2
+# kmodules.xyz/resource-metrics v0.30.5
## explicit; go 1.22.1
kmodules.xyz/resource-metrics
kmodules.xyz/resource-metrics/api
@@ -1950,6 +1948,9 @@ kmodules.xyz/webhook-runtime/apis/workload/v1
kmodules.xyz/webhook-runtime/client/workload/v1
kmodules.xyz/webhook-runtime/registry/admissionreview/v1beta1
kmodules.xyz/webhook-runtime/runtime/serializer/versioning
+# moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7
+## explicit; go 1.13
+moul.io/http2curl/v2
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0
## explicit; go 1.20
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
@@ -2100,8 +2101,8 @@ sigs.k8s.io/structured-merge-diff/v4/value
## explicit; go 1.12
sigs.k8s.io/yaml
sigs.k8s.io/yaml/goyaml.v2
-# stash.appscode.dev/apimachinery v0.38.1-0.20250114050236-cca8469a4c04
-## explicit; go 1.22.0
+# stash.appscode.dev/apimachinery v0.39.0
+## explicit; go 1.22.1
stash.appscode.dev/apimachinery/apis
stash.appscode.dev/apimachinery/apis/repositories
stash.appscode.dev/apimachinery/apis/repositories/install
diff --git a/vendor/moul.io/http2curl/v2/.gitattributes b/vendor/moul.io/http2curl/v2/.gitattributes
new file mode 100644
index 000000000..9a0d71ac4
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/.gitattributes
@@ -0,0 +1,12 @@
+# Auto detect text files and perform LF normalization
+* text=auto
+
+# Collapse vendored and generated files on GitHub
+vendor/* linguist-vendored
+rules.mk linguest-vendored
+*/vendor/* linguist-vendored
+*.gen.* linguist-generated
+*.pb.go linguist-generated
+
+# Reduce conflicts on markdown files
+*.md merge=union
diff --git a/vendor/moul.io/http2curl/v2/.gitignore b/vendor/moul.io/http2curl/v2/.gitignore
new file mode 100644
index 000000000..6f74a6986
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/.gitignore
@@ -0,0 +1,28 @@
+# Temporary files
+*~
+*#
+.#*
+coverage.txt
+
+# Vendors
+package-lock.json
+node_modules/
+vendor/
+
+# Binaries for programs and plugins
+dist/
+gin-bin
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# IDE settings
+.idea
\ No newline at end of file
diff --git a/vendor/moul.io/http2curl/v2/.releaserc.js b/vendor/moul.io/http2curl/v2/.releaserc.js
new file mode 100644
index 000000000..566642fc0
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/.releaserc.js
@@ -0,0 +1,8 @@
+module.exports = {
+ branch: 'master',
+ plugins: [
+ '@semantic-release/commit-analyzer',
+ '@semantic-release/release-notes-generator',
+ '@semantic-release/github',
+ ],
+};
diff --git a/vendor/moul.io/http2curl/v2/AUTHORS b/vendor/moul.io/http2curl/v2/AUTHORS
new file mode 100644
index 000000000..31e7bebb4
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/AUTHORS
@@ -0,0 +1,15 @@
+# This file lists all individuals having contributed content to the repository.
+# For how it is generated, see 'https://github.com/moul/rules.mk'
+
+Darko Djalevski
+Drew LeSueur
+Elliot Chance
+Hani Zakher
+Jay Taylor
+Manfred Touron <94029+moul@users.noreply.github.com>
+Manfred Touron
+psaffrey
+Quentin Perez
+Renovate Bot
+Richardson Lima
+Th3Whit3D3ath <49405908+Th3Whit3D3ath@users.noreply.github.com>
diff --git a/vendor/moul.io/http2curl/v2/COPYRIGHT b/vendor/moul.io/http2curl/v2/COPYRIGHT
new file mode 100644
index 000000000..db9d0bb0d
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/COPYRIGHT
@@ -0,0 +1,22 @@
+Copyright 2015-2021 Manfred Touron and other http2curl Developers.
+
+Intellectual Property Notice
+----------------------------
+
+http2curl is licensed under the Apache License, Version 2.0
+(see LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) or
+the MIT license (see LICENSE-MIT or http://opensource.org/licenses/MIT),
+at your option.
+
+Copyrights and patents in the http2curls project are retained
+by contributors.
+No copyright assignment is required to contribute to http2curl.
+
+ SPDX-License-Identifier: (Apache-2.0 OR MIT)
+
+SPDX usage
+----------
+
+Individual files may contain SPDX tags instead of the full license text.
+This enables machine processing of license information based on the SPDX
+License Identifiers that are available here: https://spdx.org/licenses/
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE b/vendor/moul.io/http2curl/v2/LICENSE-APACHE
similarity index 99%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE
rename to vendor/moul.io/http2curl/v2/LICENSE-APACHE
index 8dada3eda..55b841758 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE
+++ b/vendor/moul.io/http2curl/v2/LICENSE-APACHE
@@ -178,7 +178,7 @@
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
+ boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright {yyyy} {name of copyright owner}
+ Copyright 2015-2021 Manfred Touron (manfred.life)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/moul.io/http2curl/v2/LICENSE-MIT b/vendor/moul.io/http2curl/v2/LICENSE-MIT
new file mode 100644
index 000000000..b6bc77722
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/LICENSE-MIT
@@ -0,0 +1,19 @@
+Copyright (c) 2015-2021 Manfred Touron (manfred.life)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/moul.io/http2curl/v2/Makefile b/vendor/moul.io/http2curl/v2/Makefile
new file mode 100644
index 000000000..52b5be5b8
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/Makefile
@@ -0,0 +1,5 @@
+GOPKG ?= moul.io/http2curl
+
+all: test
+
+include rules.mk
diff --git a/vendor/moul.io/http2curl/v2/README.md b/vendor/moul.io/http2curl/v2/README.md
new file mode 100644
index 000000000..971880826
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/README.md
@@ -0,0 +1,59 @@
+# http2curl
+
+:triangular_ruler: Convert Golang's http.Request to CURL command line
+
+[](https://pkg.go.dev/moul.io/http2curl)
+[](https://github.com/moul/http2curl/blob/master/COPYRIGHT)
+[](https://github.com/moul/http2curl/releases)
+[](https://microbadger.com/images/moul/http2curl)
+[](https://manfred.life/)
+
+[](https://github.com/moul/http2curl/actions?query=workflow%3AGo)
+[](https://github.com/moul/http2curl/actions?query=workflow%3ARelease)
+[](https://github.com/moul/http2curl/actions?query=workflow%3APR)
+[](https://golangci.com/r/github.com/moul/http2curl)
+[](https://codecov.io/gh/moul/http2curl)
+[](https://goreportcard.com/report/moul.io/http2curl)
+[](https://www.codefactor.io/repository/github/moul/http2curl)
+
+To do the reverse operation, check out [mholt/curl-to-go](https://github.com/mholt/curl-to-go).
+
+## Example
+
+```go
+import (
+ "http"
+ "moul.io/http2curl"
+)
+
+data := bytes.NewBufferString(`{"hello":"world","answer":42}`)
+req, _ := http.NewRequest("PUT", "http://www.example.com/abc/def.ghi?jlk=mno&pqr=stu", data)
+req.Header.Set("Content-Type", "application/json")
+
+command, _ := http2curl.GetCurlCommand(req)
+fmt.Println(command)
+// Output: curl -X PUT -d "{\"hello\":\"world\",\"answer\":42}" -H "Content-Type: application/json" http://www.example.com/abc/def.ghi?jlk=mno&pqr=stu
+```
+
+## Install
+
+```bash
+go get moul.io/http2curl
+```
+
+## Usages
+
+- https://github.com/parnurzeal/gorequest
+- https://github.com/scaleway/scaleway-cli
+- https://github.com/nmonterroso/cowsay-slackapp
+- https://github.com/moul/as-a-service
+- https://github.com/gavv/httpexpect
+- https://github.com/smallnest/goreq
+
+## License
+
+© 2019-2021 [Manfred Touron](https://manfred.life)
+
+Licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) ([`LICENSE-APACHE`](LICENSE-APACHE)) or the [MIT license](https://opensource.org/licenses/MIT) ([`LICENSE-MIT`](LICENSE-MIT)), at your option. See the [`COPYRIGHT`](COPYRIGHT) file for more details.
+
+`SPDX-License-Identifier: (Apache-2.0 OR MIT)`
diff --git a/vendor/moul.io/http2curl/v2/doc.go b/vendor/moul.io/http2curl/v2/doc.go
new file mode 100644
index 000000000..6820facab
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/doc.go
@@ -0,0 +1 @@
+package http2curl
diff --git a/vendor/moul.io/http2curl/v2/http2curl.go b/vendor/moul.io/http2curl/v2/http2curl.go
new file mode 100644
index 000000000..df21c1936
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/http2curl.go
@@ -0,0 +1,85 @@
+package http2curl
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "sort"
+ "strings"
+)
+
+// CurlCommand contains exec.Command compatible slice + helpers
+type CurlCommand []string
+
+// append appends a string to the CurlCommand
+func (c *CurlCommand) append(newSlice ...string) {
+ *c = append(*c, newSlice...)
+}
+
+// String returns a ready to copy/paste command
+func (c *CurlCommand) String() string {
+ return strings.Join(*c, " ")
+}
+
+func bashEscape(str string) string {
+ return `'` + strings.Replace(str, `'`, `'\''`, -1) + `'`
+}
+
+// GetCurlCommand returns a CurlCommand corresponding to an http.Request
+func GetCurlCommand(req *http.Request) (*CurlCommand, error) {
+ if req.URL == nil {
+ return nil, fmt.Errorf("getCurlCommand: invalid request, req.URL is nil")
+ }
+
+ command := CurlCommand{}
+
+ command.append("curl")
+
+ schema := req.URL.Scheme
+ requestURL := req.URL.String()
+ if schema == "" {
+ schema = "http"
+ if req.TLS != nil {
+ schema = "https"
+ }
+ requestURL = schema + "://" + req.Host + req.URL.Path
+ }
+
+ if schema == "https" {
+ command.append("-k")
+ }
+
+ command.append("-X", bashEscape(req.Method))
+
+ if req.Body != nil {
+ var buff bytes.Buffer
+ _, err := buff.ReadFrom(req.Body)
+ if err != nil {
+ return nil, fmt.Errorf("getCurlCommand: buffer read from body error: %w", err)
+ }
+ // reset body for potential re-reads
+ req.Body = ioutil.NopCloser(bytes.NewBuffer(buff.Bytes()))
+ if len(buff.String()) > 0 {
+ bodyEscaped := bashEscape(buff.String())
+ command.append("-d", bodyEscaped)
+ }
+ }
+
+ var keys []string
+
+ for k := range req.Header {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ command.append("-H", bashEscape(fmt.Sprintf("%s: %s", k, strings.Join(req.Header[k], " "))))
+ }
+
+ command.append(bashEscape(requestURL))
+
+ command.append("--compressed")
+
+ return &command, nil
+}
diff --git a/vendor/moul.io/http2curl/v2/rules.mk b/vendor/moul.io/http2curl/v2/rules.mk
new file mode 100644
index 000000000..cc1fe6177
--- /dev/null
+++ b/vendor/moul.io/http2curl/v2/rules.mk
@@ -0,0 +1,361 @@
+# +--------------------------------------------------------------+
+# | * * * moul.io/rules.mk |
+# +--------------------------------------------------------------+
+# | |
+# | ++ ______________________________________ |
+# | ++++ / \ |
+# | ++++ | | |
+# | ++++++++++ | https://moul.io/rules.mk is a set | |
+# | +++ | | of common Makefile rules that can | |
+# | ++ | | be configured from the Makefile | |
+# | + -== ==| | or with environment variables. | |
+# | ( <*> <*> | | |
+# | | | /| Manfred Touron | |
+# | | _) / | manfred.life | |
+# | | +++ / \______________________________________/ |
+# | \ =+ / |
+# | \ + |
+# | |\++++++ |
+# | | ++++ ||// |
+# | ___| |___ _||/__ __|
+# | / --- \ \| ||| __ _ ___ __ __/ /|
+# |/ | | \ \ / / ' \/ _ \/ // / / |
+# || | | | | | /_/_/_/\___/\_,_/_/ |
+# +--------------------------------------------------------------+
+
+.PHONY: _default_entrypoint
+_default_entrypoint: help
+
+##
+## Common helpers
+##
+
+rwildcard = $(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
+check-program = $(foreach exec,$(1),$(if $(shell PATH="$(PATH)" which $(exec)),,$(error "No $(exec) in PATH")))
+my-filter-out = $(foreach v,$(2),$(if $(findstring $(1),$(v)),,$(v)))
+novendor = $(call my-filter-out,vendor/,$(1))
+
+##
+## rules.mk
+##
+ifneq ($(wildcard rules.mk),)
+.PHONY: rulesmk.bumpdeps
+rulesmk.bumpdeps:
+ wget -O rules.mk https://raw.githubusercontent.com/moul/rules.mk/master/rules.mk
+BUMPDEPS_STEPS += rulesmk.bumpdeps
+endif
+
+##
+## Maintainer
+##
+
+ifneq ($(wildcard .git/HEAD),)
+.PHONY: generate.authors
+generate.authors: AUTHORS
+AUTHORS: .git/
+ echo "# This file lists all individuals having contributed content to the repository." > AUTHORS
+ echo "# For how it is generated, see 'https://github.com/moul/rules.mk'" >> AUTHORS
+ echo >> AUTHORS
+ git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf >> AUTHORS
+GENERATE_STEPS += generate.authors
+endif
+
+##
+## Golang
+##
+
+ifndef GOPKG
+ifneq ($(wildcard go.mod),)
+GOPKG = $(shell sed '/module/!d;s/^omdule\ //' go.mod)
+endif
+endif
+ifdef GOPKG
+GO ?= go
+GOPATH ?= $(HOME)/go
+GO_INSTALL_OPTS ?=
+GO_TEST_OPTS ?= -test.timeout=30s
+GOMOD_DIRS ?= $(sort $(call novendor,$(dir $(call rwildcard,*,*/go.mod go.mod))))
+GOCOVERAGE_FILE ?= ./coverage.txt
+GOTESTJSON_FILE ?= ./go-test.json
+GOBUILDLOG_FILE ?= ./go-build.log
+GOINSTALLLOG_FILE ?= ./go-install.log
+
+ifdef GOBINS
+.PHONY: go.install
+go.install:
+ifeq ($(CI),true)
+ @rm -f /tmp/goinstall.log
+ @set -e; for dir in $(GOBINS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) install -v $(GO_INSTALL_OPTS) .; \
+ ); done 2>&1 | tee $(GOINSTALLLOG_FILE)
+
+else
+ @set -e; for dir in $(GOBINS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) install $(GO_INSTALL_OPTS) .; \
+ ); done
+endif
+INSTALL_STEPS += go.install
+
+.PHONY: go.release
+go.release:
+ $(call check-program, goreleaser)
+ goreleaser --snapshot --skip-publish --rm-dist
+ @echo -n "Do you want to release? [y/N] " && read ans && \
+ if [ $${ans:-N} = y ]; then set -xe; goreleaser --rm-dist; fi
+RELEASE_STEPS += go.release
+endif
+
+.PHONY: go.unittest
+go.unittest:
+ifeq ($(CI),true)
+ @echo "mode: atomic" > /tmp/gocoverage
+ @rm -f $(GOTESTJSON_FILE)
+ @set -e; for dir in $(GOMOD_DIRS); do (set -e; (set -euf pipefail; \
+ cd $$dir; \
+ (($(GO) test ./... $(GO_TEST_OPTS) -cover -coverprofile=/tmp/profile.out -covermode=atomic -race -json && touch $@.ok) | tee -a $(GOTESTJSON_FILE) 3>&1 1>&2 2>&3 | tee -a $(GOBUILDLOG_FILE); \
+ ); \
+ rm $@.ok 2>/dev/null || exit 1; \
+ if [ -f /tmp/profile.out ]; then \
+ cat /tmp/profile.out | sed "/mode: atomic/d" >> /tmp/gocoverage; \
+ rm -f /tmp/profile.out; \
+ fi)); done
+ @mv /tmp/gocoverage $(GOCOVERAGE_FILE)
+else
+ @echo "mode: atomic" > /tmp/gocoverage
+ @set -e; for dir in $(GOMOD_DIRS); do (set -e; (set -xe; \
+ cd $$dir; \
+ $(GO) test ./... $(GO_TEST_OPTS) -cover -coverprofile=/tmp/profile.out -covermode=atomic -race); \
+ if [ -f /tmp/profile.out ]; then \
+ cat /tmp/profile.out | sed "/mode: atomic/d" >> /tmp/gocoverage; \
+ rm -f /tmp/profile.out; \
+ fi); done
+ @mv /tmp/gocoverage $(GOCOVERAGE_FILE)
+endif
+
+.PHONY: go.checkdoc
+go.checkdoc:
+ go doc $(first $(GOMOD_DIRS))
+
+.PHONY: go.coverfunc
+go.coverfunc: go.unittest
+ go tool cover -func=$(GOCOVERAGE_FILE) | grep -v .pb.go: | grep -v .pb.gw.go:
+
+.PHONY: go.lint
+go.lint:
+ @set -e; for dir in $(GOMOD_DIRS); do ( set -xe; \
+ cd $$dir; \
+ golangci-lint run --verbose ./...; \
+ ); done
+
+.PHONY: go.tidy
+go.tidy:
+ @# tidy dirs with go.mod files
+ @set -e; for dir in $(GOMOD_DIRS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) mod tidy; \
+ ); done
+
+.PHONY: go.depaware-update
+go.depaware-update: go.tidy
+ @# gen depaware for bins
+ @set -e; for dir in $(GOBINS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) run github.com/tailscale/depaware --update .; \
+ ); done
+ @# tidy unused depaware deps if not in a tools_test.go file
+ @set -e; for dir in $(GOMOD_DIRS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) mod tidy; \
+ ); done
+
+.PHONY: go.depaware-check
+go.depaware-check: go.tidy
+ @# gen depaware for bins
+ @set -e; for dir in $(GOBINS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) run github.com/tailscale/depaware --check .; \
+ ); done
+
+
+.PHONY: go.build
+go.build:
+ @set -e; for dir in $(GOMOD_DIRS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) build ./...; \
+ ); done
+
+.PHONY: go.bump-deps
+go.bumpdeps:
+ @set -e; for dir in $(GOMOD_DIRS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) get -u ./...; \
+ ); done
+
+.PHONY: go.bump-deps
+go.fmt:
+ @set -e; for dir in $(GOMOD_DIRS); do ( set -xe; \
+ cd $$dir; \
+ $(GO) run golang.org/x/tools/cmd/goimports -w `go list -f '{{.Dir}}' ./...` \
+ ); done
+
+VERIFY_STEPS += go.depaware-check
+BUILD_STEPS += go.build
+BUMPDEPS_STEPS += go.bumpdeps go.depaware-update
+TIDY_STEPS += go.tidy
+LINT_STEPS += go.lint
+UNITTEST_STEPS += go.unittest
+FMT_STEPS += go.fmt
+
+# FIXME: disabled, because currently slow
+# new rule that is manually run sometimes, i.e. `make pre-release` or `make maintenance`.
+# alternative: run it each time the go.mod is changed
+#GENERATE_STEPS += go.depaware-update
+endif
+
+##
+## Gitattributes
+##
+
+ifneq ($(wildcard .gitattributes),)
+.PHONY: _linguist-ignored
+_linguist-kept:
+ @git check-attr linguist-vendored $(shell git check-attr linguist-generated $(shell find . -type f | grep -v .git/) | grep unspecified | cut -d: -f1) | grep unspecified | cut -d: -f1 | sort
+
+.PHONY: _linguist-kept
+_linguist-ignored:
+ @git check-attr linguist-vendored linguist-ignored `find . -not -path './.git/*' -type f` | grep '\ set$$' | cut -d: -f1 | sort -u
+endif
+
+##
+## Node
+##
+
+ifndef NPM_PACKAGES
+ifneq ($(wildcard package.json),)
+NPM_PACKAGES = .
+endif
+endif
+ifdef NPM_PACKAGES
+.PHONY: npm.publish
+npm.publish:
+ @echo -n "Do you want to npm publish? [y/N] " && read ans && \
+ @if [ $${ans:-N} = y ]; then \
+ set -e; for dir in $(NPM_PACKAGES); do ( set -xe; \
+ cd $$dir; \
+ npm publish --access=public; \
+ ); done; \
+ fi
+RELEASE_STEPS += npm.publish
+endif
+
+##
+## Docker
+##
+
+docker_build = docker build \
+ --build-arg VCS_REF=`git rev-parse --short HEAD` \
+ --build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` \
+ --build-arg VERSION=`git describe --tags --always` \
+ -t "$2" -f "$1" "$(dir $1)"
+
+ifndef DOCKERFILE_PATH
+DOCKERFILE_PATH = ./Dockerfile
+endif
+ifndef DOCKER_IMAGE
+ifneq ($(wildcard Dockerfile),)
+DOCKER_IMAGE = $(notdir $(PWD))
+endif
+endif
+ifdef DOCKER_IMAGE
+ifneq ($(DOCKER_IMAGE),none)
+.PHONY: docker.build
+docker.build:
+ $(call check-program, docker)
+ $(call docker_build,$(DOCKERFILE_PATH),$(DOCKER_IMAGE))
+
+BUILD_STEPS += docker.build
+endif
+endif
+
+##
+## Common
+##
+
+TEST_STEPS += $(UNITTEST_STEPS)
+TEST_STEPS += $(LINT_STEPS)
+TEST_STEPS += $(TIDY_STEPS)
+
+ifneq ($(strip $(TEST_STEPS)),)
+.PHONY: test
+test: $(PRE_TEST_STEPS) $(TEST_STEPS)
+endif
+
+ifdef INSTALL_STEPS
+.PHONY: install
+install: $(PRE_INSTALL_STEPS) $(INSTALL_STEPS)
+endif
+
+ifdef UNITTEST_STEPS
+.PHONY: unittest
+unittest: $(PRE_UNITTEST_STEPS) $(UNITTEST_STEPS)
+endif
+
+ifdef LINT_STEPS
+.PHONY: lint
+lint: $(PRE_LINT_STEPS) $(FMT_STEPS) $(LINT_STEPS)
+endif
+
+ifdef TIDY_STEPS
+.PHONY: tidy
+tidy: $(PRE_TIDY_STEPS) $(TIDY_STEPS)
+endif
+
+ifdef BUILD_STEPS
+.PHONY: build
+build: $(PRE_BUILD_STEPS) $(BUILD_STEPS)
+endif
+
+ifdef VERIFY_STEPS
+.PHONY: verify
+verify: $(PRE_VERIFY_STEPS) $(VERIFY_STEPS)
+endif
+
+ifdef RELEASE_STEPS
+.PHONY: release
+release: $(PRE_RELEASE_STEPS) $(RELEASE_STEPS)
+endif
+
+ifdef BUMPDEPS_STEPS
+.PHONY: bumpdeps
+bumpdeps: $(PRE_BUMDEPS_STEPS) $(BUMPDEPS_STEPS)
+endif
+
+ifdef FMT_STEPS
+.PHONY: fmt
+fmt: $(PRE_FMT_STEPS) $(FMT_STEPS)
+endif
+
+ifdef GENERATE_STEPS
+.PHONY: generate
+generate: $(PRE_GENERATE_STEPS) $(GENERATE_STEPS)
+endif
+
+.PHONY: help
+help::
+ @echo "General commands:"
+ @[ "$(BUILD_STEPS)" != "" ] && echo " build" || true
+ @[ "$(BUMPDEPS_STEPS)" != "" ] && echo " bumpdeps" || true
+ @[ "$(FMT_STEPS)" != "" ] && echo " fmt" || true
+ @[ "$(GENERATE_STEPS)" != "" ] && echo " generate" || true
+ @[ "$(INSTALL_STEPS)" != "" ] && echo " install" || true
+ @[ "$(LINT_STEPS)" != "" ] && echo " lint" || true
+ @[ "$(RELEASE_STEPS)" != "" ] && echo " release" || true
+ @[ "$(TEST_STEPS)" != "" ] && echo " test" || true
+ @[ "$(TIDY_STEPS)" != "" ] && echo " tidy" || true
+ @[ "$(UNITTEST_STEPS)" != "" ] && echo " unittest" || true
+ @[ "$(VERIFY_STEPS)" != "" ] && echo " verify" || true
+ @# FIXME: list other commands
+
+print-% : ; $(info $* is a $(flavor $*) variable set to [$($*)]) @true
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1alpha1/util/repository.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1alpha1/util/repository.go
index aaf30712d..3794cae92 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1alpha1/util/repository.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1alpha1/util/repository.go
@@ -94,7 +94,6 @@ func TryUpdateRepository(ctx context.Context, c cs.StashV1alpha1Interface, meta
klog.Errorf("Attempt %d failed to update Repository %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update Repository %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
@@ -148,7 +147,6 @@ func UpdateRepositoryStatus(
}
return e2 == nil, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update status of Repository %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupbatch.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupbatch.go
index 2c85e25f3..afcd0849b 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupbatch.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupbatch.go
@@ -94,7 +94,6 @@ func TryUpdateBackupBatch(ctx context.Context, c cs.StashV1beta1Interface, meta
klog.Errorf("Attempt %d failed to update BackupBatch %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update BackupBatch %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
@@ -148,7 +147,6 @@ func UpdateBackupBatchStatus(
}
return e2 == nil, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update status of BackupBatch %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupblueprint.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupblueprint.go
index cb6b14d73..6ba792a53 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupblueprint.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupblueprint.go
@@ -94,7 +94,6 @@ func TryUpdateBackupBlueprint(ctx context.Context, c cs.StashV1beta1Interface, m
klog.Errorf("Attempt %d failed to update BackupBlueprint %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update BackupBlueprint %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupconfiguration.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupconfiguration.go
index 508033b29..b925d5308 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupconfiguration.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupconfiguration.go
@@ -94,7 +94,6 @@ func TryUpdateBackupConfiguration(ctx context.Context, c cs.StashV1beta1Interfac
klog.Errorf("Attempt %d failed to update BackupConfiguration %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update BackupConfiguration %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
@@ -148,7 +147,6 @@ func UpdateBackupConfigurationStatus(
}
return e2 == nil, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update status of BackupConfiguration %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupsession.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupsession.go
index 0364f7a2a..c547b8425 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupsession.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/backupsession.go
@@ -94,7 +94,6 @@ func TryUpdateBackupSession(ctx context.Context, c cs.StashV1beta1Interface, met
klog.Errorf("Attempt %d failed to update BackupSession %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update BackupSession %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
@@ -148,7 +147,6 @@ func UpdateBackupSessionStatus(
}
return e2 == nil, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update status of BackupSession %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/function.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/function.go
index 2398e7ef4..06050c924 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/function.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/function.go
@@ -94,7 +94,6 @@ func TryUpdateFunction(ctx context.Context, c cs.StashV1beta1Interface, meta met
klog.Errorf("Attempt %d failed to update Function %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update Function %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restorebatch.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restorebatch.go
index cc09c319b..4c32fe50f 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restorebatch.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restorebatch.go
@@ -94,7 +94,6 @@ func TryUpdateRestoreBatch(ctx context.Context, c cs.StashV1beta1Interface, meta
klog.Errorf("Attempt %d failed to update RestoreBatch %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update RestoreBatch %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
@@ -148,7 +147,6 @@ func UpdateRestoreBatchStatus(
}
return e2 == nil, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update status of RestoreBatch %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restoresession.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restoresession.go
index d514555d7..ab547c4d6 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restoresession.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/restoresession.go
@@ -94,7 +94,6 @@ func TryUpdateRestoreSession(ctx context.Context, c cs.StashV1beta1Interface, me
klog.Errorf("Attempt %d failed to update RestoreSession %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update RestoreSession %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
@@ -148,7 +147,6 @@ func UpdateRestoreSessionStatus(
}
return e2 == nil, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update status of RestoreSession %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}
diff --git a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/task.go b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/task.go
index ae45bbe56..a27d54914 100644
--- a/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/task.go
+++ b/vendor/stash.appscode.dev/apimachinery/client/clientset/versioned/typed/stash/v1beta1/util/task.go
@@ -94,7 +94,6 @@ func TryUpdateTask(ctx context.Context, c cs.StashV1beta1Interface, meta metav1.
klog.Errorf("Attempt %d failed to update Task %s/%s due to %v.", attempt, cur.Namespace, cur.Name, e2)
return false, nil
})
-
if err != nil {
err = fmt.Errorf("failed to update Task %s/%s after %d attempts due to %v", meta.Namespace, meta.Name, attempt, err)
}